[llvm] [RISCV][GISEL] instruction-select for G_SPLAT_VECTOR (PR #111193)

Michael Maitland via llvm-commits llvm-commits at lists.llvm.org
Mon Oct 7 07:08:38 PDT 2024


https://github.com/michaelmaitland updated https://github.com/llvm/llvm-project/pull/111193

>From 85cf231ef1eaa29c59637166bbd1e162f252a657 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Tue, 1 Oct 2024 15:33:05 -0700
Subject: [PATCH 1/6] [RISCV][GISEL] instruction-select for G_SPLAT_VECTOR

SelectionDAG lowers ISD::SPLAT_VECTOR in a pre-instruction-select step too.

By taking this approach, we allow the generic combiner to operate on
G_SPLAT_VECTOR instead of RISCV Generic Opcodes.
---
 .../RISCV/GISel/RISCVInstructionSelector.cpp  |  39 ++
 llvm/lib/Target/RISCV/RISCVInstrGISel.td      |  16 +
 .../rvv/splatvector-rv32.mir                  | 600 +++++++++++++++++
 .../rvv/splatvector-rv64.mir                  | 611 ++++++++++++++++++
 4 files changed, 1266 insertions(+)
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/splatvector-rv32.mir
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/splatvector-rv64.mir

diff --git a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
index cfe8644b892298..eb3152ad768890 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
@@ -568,6 +568,18 @@ bool RISCVInstructionSelector::select(MachineInstr &MI) {
     return true;
   }
 
+  // FIXME: We create a IMPLICIT_DEF and a G_CONSTANT in preISelLower when
+  // we encounter a G_SPLAT_VECTOR. We cannot select the G_CONSTANT until after
+  // the MI is lowered, since renderVLOp needs to see the G_CONSTANT. It would
+  // be nice if the InstructionSelector selected these instructions without
+  // needing to call select on them explicitly.
+  if (Opc == RISCV::G_VMV_V_X_VL || Opc == RISCV::G_VFMV_V_F_VL) {
+    MachineInstr *Passthru = MRI->getVRegDef(MI.getOperand(1).getReg());
+    MachineInstr *VL = MRI->getVRegDef(MI.getOperand(3).getReg());
+    if (selectImpl(MI, *CoverageInfo))
+      return select(*Passthru) && select(*VL);
+  }
+
   if (selectImpl(MI, *CoverageInfo))
     return true;
 
@@ -800,6 +812,33 @@ void RISCVInstructionSelector::preISelLower(MachineInstr &MI,
     replacePtrWithInt(MI.getOperand(1), MIB);
     MI.setDesc(TII.get(TargetOpcode::G_AND));
     MRI->setType(DstReg, sXLen);
+    break;
+  }
+  case TargetOpcode::G_SPLAT_VECTOR: {
+    // Convert integer SPLAT_VECTOR to VMV_V_X_VL and floating-point
+    // SPLAT_VECTOR to VFMV_V_F_VL to reduce isel burden.
+    Register Scalar = MI.getOperand(1).getReg();
+    bool IsGPRSplat = isRegInGprb(Scalar);
+    const LLT sXLen = LLT::scalar(STI.getXLen());
+    if (IsGPRSplat && TypeSize::isKnownLT(MRI->getType(Scalar).getSizeInBits(),
+                                          sXLen.getSizeInBits()))
+      Scalar = MIB.buildAnyExt(sXLen, Scalar).getReg(0);
+
+    // Convert MI in place, since select function is trying to select this
+    // instruction.
+    unsigned Opc = IsGPRSplat ? RISCV::G_VMV_V_X_VL : RISCV::G_VFMV_V_F_VL;
+    MI.setDesc(TII.get(Opc));
+    MI.removeOperand(1);
+    LLT VecTy = MRI->getType(MI.getOperand(0).getReg());
+    auto Passthru = MIB.buildUndef(VecTy);
+    auto VLMax = MIB.buildConstant(sXLen, -1);
+    MRI->setRegBank(Passthru.getReg(0), RBI.getRegBank(RISCV::VRBRegBankID));
+    MRI->setRegBank(VLMax.getReg(0), RBI.getRegBank(RISCV::GPRBRegBankID));
+    MachineInstrBuilder(*MI.getMF(), &MI)
+        .addUse(Passthru.getReg(0))
+        .addUse(Scalar)
+        .addUse(VLMax.getReg(0));
+    break;
   }
   }
 }
diff --git a/llvm/lib/Target/RISCV/RISCVInstrGISel.td b/llvm/lib/Target/RISCV/RISCVInstrGISel.td
index f6bf74c565ab38..b26751a3c8e8e5 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrGISel.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrGISel.td
@@ -49,6 +49,22 @@ def G_VMSET_VL : RISCVGenericInstruction {
 }
 def : GINodeEquiv<G_VMSET_VL, riscv_vmset_vl>;
 
+// Pseudo equivalent to a RISCVISD::VMV_V_X_VL
+def G_VMV_V_X_VL : RISCVGenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type1:$src);
+  let hasSideEffects = false;
+}
+def : GINodeEquiv<G_VMV_V_X_VL, riscv_vmv_v_x_vl>;
+
+// Pseudo equivalent to a RISCVISD::VFMV_V_F_VL
+def G_VFMV_V_F_VL : RISCVGenericInstruction {
+  let OutOperandList = (outs type0:$dst);
+  let InOperandList = (ins type1:$src);
+  let hasSideEffects = false;
+}
+def : GINodeEquiv<G_VFMV_V_F_VL, riscv_vfmv_v_f_vl>;
+
 // Pseudo equivalent to a RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL. There is no
 // record to mark as equivalent to using GINodeEquiv because it gets lowered
 // before instruction selection.
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/splatvector-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/splatvector-rv32.mir
new file mode 100644
index 00000000000000..4066c4c36a8d53
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/splatvector-rv32.mir
@@ -0,0 +1,600 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v,+m -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - | FileCheck %s
+
+---
+name:            splat_zero_nxv1i8
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv1i8
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_MF8_:%[0-9]+]]:vr = PseudoVMV_V_X_MF8 [[DEF]], [[COPY]], -1, 3 /* e8 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8 = COPY [[PseudoVMV_V_X_MF8_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %3:gprb(s32) = G_CONSTANT i32 0
+    %0:vrb(<vscale x 1 x s8>) = G_SPLAT_VECTOR %3(s32)
+    $v8 = COPY %0(<vscale x 1 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv2i8
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv2i8
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_MF4_:%[0-9]+]]:vr = PseudoVMV_V_X_MF4 [[DEF]], [[COPY]], -1, 3 /* e8 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8 = COPY [[PseudoVMV_V_X_MF4_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %3:gprb(s32) = G_CONSTANT i32 0
+    %0:vrb(<vscale x 2 x s8>) = G_SPLAT_VECTOR %3(s32)
+    $v8 = COPY %0(<vscale x 2 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv4i8
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv4i8
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_MF2_:%[0-9]+]]:vr = PseudoVMV_V_X_MF2 [[DEF]], [[COPY]], -1, 3 /* e8 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8 = COPY [[PseudoVMV_V_X_MF2_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %3:gprb(s32) = G_CONSTANT i32 0
+    %0:vrb(<vscale x 4 x s8>) = G_SPLAT_VECTOR %3(s32)
+    $v8 = COPY %0(<vscale x 4 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv8i8
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv8i8
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_M1_:%[0-9]+]]:vr = PseudoVMV_V_X_M1 [[DEF]], [[COPY]], -1, 3 /* e8 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8 = COPY [[PseudoVMV_V_X_M1_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %3:gprb(s32) = G_CONSTANT i32 0
+    %0:vrb(<vscale x 8 x s8>) = G_SPLAT_VECTOR %3(s32)
+    $v8 = COPY %0(<vscale x 8 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv16i8
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv16i8
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_M2_:%[0-9]+]]:vrm2 = PseudoVMV_V_X_M2 [[DEF]], [[COPY]], -1, 3 /* e8 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8m2 = COPY [[PseudoVMV_V_X_M2_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %3:gprb(s32) = G_CONSTANT i32 0
+    %0:vrb(<vscale x 16 x s8>) = G_SPLAT_VECTOR %3(s32)
+    $v8m2 = COPY %0(<vscale x 16 x s8>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            splat_zero_nxv32i8
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv32i8
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_M4_:%[0-9]+]]:vrm4 = PseudoVMV_V_X_M4 [[DEF]], [[COPY]], -1, 3 /* e8 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8m4 = COPY [[PseudoVMV_V_X_M4_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %3:gprb(s32) = G_CONSTANT i32 0
+    %0:vrb(<vscale x 32 x s8>) = G_SPLAT_VECTOR %3(s32)
+    $v8m4 = COPY %0(<vscale x 32 x s8>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            splat_zero_nxv64i8
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv64i8
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_M8_:%[0-9]+]]:vrm8 = PseudoVMV_V_X_M8 [[DEF]], [[COPY]], -1, 3 /* e8 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8m8 = COPY [[PseudoVMV_V_X_M8_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %3:gprb(s32) = G_CONSTANT i32 0
+    %0:vrb(<vscale x 64 x s8>) = G_SPLAT_VECTOR %3(s32)
+    $v8m8 = COPY %0(<vscale x 64 x s8>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            splat_zero_nxv1i16
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv1i16
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_MF4_:%[0-9]+]]:vr = PseudoVMV_V_X_MF4 [[DEF]], [[COPY]], -1, 4 /* e16 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8 = COPY [[PseudoVMV_V_X_MF4_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %3:gprb(s32) = G_CONSTANT i32 0
+    %0:vrb(<vscale x 1 x s16>) = G_SPLAT_VECTOR %3(s32)
+    $v8 = COPY %0(<vscale x 1 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv2i16
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv2i16
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_MF2_:%[0-9]+]]:vr = PseudoVMV_V_X_MF2 [[DEF]], [[COPY]], -1, 4 /* e16 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8 = COPY [[PseudoVMV_V_X_MF2_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %3:gprb(s32) = G_CONSTANT i32 0
+    %0:vrb(<vscale x 2 x s16>) = G_SPLAT_VECTOR %3(s32)
+    $v8 = COPY %0(<vscale x 2 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv4i16
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv4i16
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_M1_:%[0-9]+]]:vr = PseudoVMV_V_X_M1 [[DEF]], [[COPY]], -1, 4 /* e16 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8 = COPY [[PseudoVMV_V_X_M1_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %3:gprb(s32) = G_CONSTANT i32 0
+    %0:vrb(<vscale x 4 x s16>) = G_SPLAT_VECTOR %3(s32)
+    $v8 = COPY %0(<vscale x 4 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv8i16
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv8i16
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_M2_:%[0-9]+]]:vrm2 = PseudoVMV_V_X_M2 [[DEF]], [[COPY]], -1, 4 /* e16 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8m2 = COPY [[PseudoVMV_V_X_M2_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %3:gprb(s32) = G_CONSTANT i32 0
+    %0:vrb(<vscale x 8 x s16>) = G_SPLAT_VECTOR %3(s32)
+    $v8m2 = COPY %0(<vscale x 8 x s16>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            splat_zero_nxv16i16
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv16i16
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_M4_:%[0-9]+]]:vrm4 = PseudoVMV_V_X_M4 [[DEF]], [[COPY]], -1, 4 /* e16 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8m4 = COPY [[PseudoVMV_V_X_M4_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %3:gprb(s32) = G_CONSTANT i32 0
+    %0:vrb(<vscale x 16 x s16>) = G_SPLAT_VECTOR %3(s32)
+    $v8m4 = COPY %0(<vscale x 16 x s16>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            splat_zero_nxv32i16
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv32i16
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_M8_:%[0-9]+]]:vrm8 = PseudoVMV_V_X_M8 [[DEF]], [[COPY]], -1, 4 /* e16 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8m8 = COPY [[PseudoVMV_V_X_M8_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %3:gprb(s32) = G_CONSTANT i32 0
+    %0:vrb(<vscale x 32 x s16>) = G_SPLAT_VECTOR %3(s32)
+    $v8m8 = COPY %0(<vscale x 32 x s16>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            splat_zero_nxv1i32
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv1i32
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_MF2_:%[0-9]+]]:vr = PseudoVMV_V_X_MF2 [[DEF]], [[COPY]], -1, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8 = COPY [[PseudoVMV_V_X_MF2_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %1:gprb(s32) = G_CONSTANT i32 0
+    %0:vrb(<vscale x 1 x s32>) = G_SPLAT_VECTOR %1(s32)
+    $v8 = COPY %0(<vscale x 1 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv2i32
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv2i32
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_M1_:%[0-9]+]]:vr = PseudoVMV_V_X_M1 [[DEF]], [[COPY]], -1, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8 = COPY [[PseudoVMV_V_X_M1_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %1:gprb(s32) = G_CONSTANT i32 0
+    %0:vrb(<vscale x 2 x s32>) = G_SPLAT_VECTOR %1(s32)
+    $v8 = COPY %0(<vscale x 2 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv4i32
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv4i32
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_M2_:%[0-9]+]]:vrm2 = PseudoVMV_V_X_M2 [[DEF]], [[COPY]], -1, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8m2 = COPY [[PseudoVMV_V_X_M2_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %1:gprb(s32) = G_CONSTANT i32 0
+    %0:vrb(<vscale x 4 x s32>) = G_SPLAT_VECTOR %1(s32)
+    $v8m2 = COPY %0(<vscale x 4 x s32>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            splat_zero_nxv8i32
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv8i32
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_M4_:%[0-9]+]]:vrm4 = PseudoVMV_V_X_M4 [[DEF]], [[COPY]], -1, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8m4 = COPY [[PseudoVMV_V_X_M4_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %1:gprb(s32) = G_CONSTANT i32 0
+    %0:vrb(<vscale x 8 x s32>) = G_SPLAT_VECTOR %1(s32)
+    $v8m4 = COPY %0(<vscale x 8 x s32>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            splat_zero_nxv16i32
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv16i32
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_M8_:%[0-9]+]]:vrm8 = PseudoVMV_V_X_M8 [[DEF]], [[COPY]], -1, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8m8 = COPY [[PseudoVMV_V_X_M8_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %1:gprb(s32) = G_CONSTANT i32 0
+    %0:vrb(<vscale x 16 x s32>) = G_SPLAT_VECTOR %1(s32)
+    $v8m8 = COPY %0(<vscale x 16 x s32>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            splat_zero_nxv1i64
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv1i64
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[BuildPairF64Pseudo:%[0-9]+]]:fpr64 = BuildPairF64Pseudo [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVFMV_V_FPR64_M1_:%[0-9]+]]:vr = PseudoVFMV_V_FPR64_M1 [[DEF]], [[BuildPairF64Pseudo]], -1, 6 /* e64 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8 = COPY [[PseudoVFMV_V_FPR64_M1_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %2:gprb(s32) = G_CONSTANT i32 0
+    %3:gprb(s32) = G_CONSTANT i32 0
+    %1:fprb(s64) = G_MERGE_VALUES %2(s32), %3(s32)
+    %0:vrb(<vscale x 1 x s64>) = G_SPLAT_VECTOR %1(s64)
+    $v8 = COPY %0(<vscale x 1 x s64>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv2i64
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv2i64
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[BuildPairF64Pseudo:%[0-9]+]]:fpr64 = BuildPairF64Pseudo [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVFMV_V_FPR64_M2_:%[0-9]+]]:vrm2 = PseudoVFMV_V_FPR64_M2 [[DEF]], [[BuildPairF64Pseudo]], -1, 6 /* e64 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8m2 = COPY [[PseudoVFMV_V_FPR64_M2_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %2:gprb(s32) = G_CONSTANT i32 0
+    %3:gprb(s32) = G_CONSTANT i32 0
+    %1:fprb(s64) = G_MERGE_VALUES %2(s32), %3(s32)
+    %0:vrb(<vscale x 2 x s64>) = G_SPLAT_VECTOR %1(s64)
+    $v8m2 = COPY %0(<vscale x 2 x s64>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            splat_zero_nxv4i64
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv4i64
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[BuildPairF64Pseudo:%[0-9]+]]:fpr64 = BuildPairF64Pseudo [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVFMV_V_FPR64_M4_:%[0-9]+]]:vrm4 = PseudoVFMV_V_FPR64_M4 [[DEF]], [[BuildPairF64Pseudo]], -1, 6 /* e64 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8m4 = COPY [[PseudoVFMV_V_FPR64_M4_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %2:gprb(s32) = G_CONSTANT i32 0
+    %3:gprb(s32) = G_CONSTANT i32 0
+    %1:fprb(s64) = G_MERGE_VALUES %2(s32), %3(s32)
+    %0:vrb(<vscale x 4 x s64>) = G_SPLAT_VECTOR %1(s64)
+    $v8m4 = COPY %0(<vscale x 4 x s64>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            splat_zero_nxv8i64
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv8i64
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[BuildPairF64Pseudo:%[0-9]+]]:fpr64 = BuildPairF64Pseudo [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVFMV_V_FPR64_M8_:%[0-9]+]]:vrm8 = PseudoVFMV_V_FPR64_M8 [[DEF]], [[BuildPairF64Pseudo]], -1, 6 /* e64 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8m8 = COPY [[PseudoVFMV_V_FPR64_M8_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %2:gprb(s32) = G_CONSTANT i32 0
+    %3:gprb(s32) = G_CONSTANT i32 0
+    %1:fprb(s64) = G_MERGE_VALUES %2(s32), %3(s32)
+    %0:vrb(<vscale x 8 x s64>) = G_SPLAT_VECTOR %1(s64)
+    $v8m8 = COPY %0(<vscale x 8 x s64>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            splat_zero_nxv1f32
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv1f32
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[FMV_W_X:%[0-9]+]]:fpr32 = FMV_W_X [[COPY]]
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY [[FMV_W_X]]
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_MF2_:%[0-9]+]]:vr = PseudoVMV_V_X_MF2 [[DEF]], [[COPY1]], -1, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8 = COPY [[PseudoVMV_V_X_MF2_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %1:fprb(s32) = G_FCONSTANT float 0.000000e+00
+    %2:gprb(s32) = COPY %1(s32)
+    %0:vrb(<vscale x 1 x s32>) = G_SPLAT_VECTOR %2(s32)
+    $v8 = COPY %0(<vscale x 1 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv2f32
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv2f32
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[FMV_W_X:%[0-9]+]]:fpr32 = FMV_W_X [[COPY]]
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY [[FMV_W_X]]
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_M1_:%[0-9]+]]:vr = PseudoVMV_V_X_M1 [[DEF]], [[COPY1]], -1, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8 = COPY [[PseudoVMV_V_X_M1_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %1:fprb(s32) = G_FCONSTANT float 0.000000e+00
+    %2:gprb(s32) = COPY %1(s32)
+    %0:vrb(<vscale x 2 x s32>) = G_SPLAT_VECTOR %2(s32)
+    $v8 = COPY %0(<vscale x 2 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv4f32
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv4f32
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[FMV_W_X:%[0-9]+]]:fpr32 = FMV_W_X [[COPY]]
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY [[FMV_W_X]]
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_M2_:%[0-9]+]]:vrm2 = PseudoVMV_V_X_M2 [[DEF]], [[COPY1]], -1, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8m2 = COPY [[PseudoVMV_V_X_M2_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %1:fprb(s32) = G_FCONSTANT float 0.000000e+00
+    %2:gprb(s32) = COPY %1(s32)
+    %0:vrb(<vscale x 4 x s32>) = G_SPLAT_VECTOR %2(s32)
+    $v8m2 = COPY %0(<vscale x 4 x s32>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            splat_zero_nxv8f32
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv8f32
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[FMV_W_X:%[0-9]+]]:fpr32 = FMV_W_X [[COPY]]
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY [[FMV_W_X]]
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_M4_:%[0-9]+]]:vrm4 = PseudoVMV_V_X_M4 [[DEF]], [[COPY1]], -1, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8m4 = COPY [[PseudoVMV_V_X_M4_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %1:fprb(s32) = G_FCONSTANT float 0.000000e+00
+    %2:gprb(s32) = COPY %1(s32)
+    %0:vrb(<vscale x 8 x s32>) = G_SPLAT_VECTOR %2(s32)
+    $v8m4 = COPY %0(<vscale x 8 x s32>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            splat_zero_nxv16f32
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv16f32
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[FMV_W_X:%[0-9]+]]:fpr32 = FMV_W_X [[COPY]]
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY [[FMV_W_X]]
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_M8_:%[0-9]+]]:vrm8 = PseudoVMV_V_X_M8 [[DEF]], [[COPY1]], -1, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8m8 = COPY [[PseudoVMV_V_X_M8_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %1:fprb(s32) = G_FCONSTANT float 0.000000e+00
+    %2:gprb(s32) = COPY %1(s32)
+    %0:vrb(<vscale x 16 x s32>) = G_SPLAT_VECTOR %2(s32)
+    $v8m8 = COPY %0(<vscale x 16 x s32>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            splat_zero_nxv1f64
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv1f64
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[BuildPairF64Pseudo:%[0-9]+]]:fpr64 = BuildPairF64Pseudo [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVFMV_V_FPR64_M1_:%[0-9]+]]:vr = PseudoVFMV_V_FPR64_M1 [[DEF]], [[BuildPairF64Pseudo]], -1, 6 /* e64 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8 = COPY [[PseudoVFMV_V_FPR64_M1_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %1:fprb(s64) = G_FCONSTANT double 0.000000e+00
+    %0:vrb(<vscale x 1 x s64>) = G_SPLAT_VECTOR %1(s64)
+    $v8 = COPY %0(<vscale x 1 x s64>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv2f64
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv2f64
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[BuildPairF64Pseudo:%[0-9]+]]:fpr64 = BuildPairF64Pseudo [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVFMV_V_FPR64_M2_:%[0-9]+]]:vrm2 = PseudoVFMV_V_FPR64_M2 [[DEF]], [[BuildPairF64Pseudo]], -1, 6 /* e64 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8m2 = COPY [[PseudoVFMV_V_FPR64_M2_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %1:fprb(s64) = G_FCONSTANT double 0.000000e+00
+    %0:vrb(<vscale x 2 x s64>) = G_SPLAT_VECTOR %1(s64)
+    $v8m2 = COPY %0(<vscale x 2 x s64>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            splat_zero_nxv4f64
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv4f64
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[BuildPairF64Pseudo:%[0-9]+]]:fpr64 = BuildPairF64Pseudo [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVFMV_V_FPR64_M4_:%[0-9]+]]:vrm4 = PseudoVFMV_V_FPR64_M4 [[DEF]], [[BuildPairF64Pseudo]], -1, 6 /* e64 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8m4 = COPY [[PseudoVFMV_V_FPR64_M4_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %1:fprb(s64) = G_FCONSTANT double 0.000000e+00
+    %0:vrb(<vscale x 4 x s64>) = G_SPLAT_VECTOR %1(s64)
+    $v8m4 = COPY %0(<vscale x 4 x s64>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            splat_zero_nxv8f64
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv8f64
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[BuildPairF64Pseudo:%[0-9]+]]:fpr64 = BuildPairF64Pseudo [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVFMV_V_FPR64_M8_:%[0-9]+]]:vrm8 = PseudoVFMV_V_FPR64_M8 [[DEF]], [[BuildPairF64Pseudo]], -1, 6 /* e64 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8m8 = COPY [[PseudoVFMV_V_FPR64_M8_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %1:fprb(s64) = G_FCONSTANT double 0.000000e+00
+    %0:vrb(<vscale x 8 x s64>) = G_SPLAT_VECTOR %1(s64)
+    $v8m8 = COPY %0(<vscale x 8 x s64>)
+    PseudoRET implicit $v8m8
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/splatvector-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/splatvector-rv64.mir
new file mode 100644
index 00000000000000..09c6c3b65d7c05
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/splatvector-rv64.mir
@@ -0,0 +1,611 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=riscv64 -mattr=+v,+m -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - | FileCheck  %s
+
+---
+name:            splat_zero_nxv1i8
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv1i8
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_MF8_:%[0-9]+]]:vr = PseudoVMV_V_X_MF8 [[DEF]], [[COPY]], -1, 3 /* e8 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8 = COPY [[PseudoVMV_V_X_MF8_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %3:gprb(s32) = G_CONSTANT i32 0
+    %2:gprb(s64) = G_ANYEXT %3(s32)
+    %0:vrb(<vscale x 1 x s8>) = G_SPLAT_VECTOR %2(s64)
+    $v8 = COPY %0(<vscale x 1 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv2i8
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv2i8
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_MF4_:%[0-9]+]]:vr = PseudoVMV_V_X_MF4 [[DEF]], [[COPY]], -1, 3 /* e8 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8 = COPY [[PseudoVMV_V_X_MF4_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %3:gprb(s32) = G_CONSTANT i32 0
+    %2:gprb(s64) = G_ANYEXT %3(s32)
+    %0:vrb(<vscale x 2 x s8>) = G_SPLAT_VECTOR %2(s64)
+    $v8 = COPY %0(<vscale x 2 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv4i8
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv4i8
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_MF2_:%[0-9]+]]:vr = PseudoVMV_V_X_MF2 [[DEF]], [[COPY]], -1, 3 /* e8 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8 = COPY [[PseudoVMV_V_X_MF2_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %3:gprb(s32) = G_CONSTANT i32 0
+    %2:gprb(s64) = G_ANYEXT %3(s32)
+    %0:vrb(<vscale x 4 x s8>) = G_SPLAT_VECTOR %2(s64)
+    $v8 = COPY %0(<vscale x 4 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv8i8
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv8i8
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_M1_:%[0-9]+]]:vr = PseudoVMV_V_X_M1 [[DEF]], [[COPY]], -1, 3 /* e8 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8 = COPY [[PseudoVMV_V_X_M1_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %3:gprb(s32) = G_CONSTANT i32 0
+    %2:gprb(s64) = G_ANYEXT %3(s32)
+    %0:vrb(<vscale x 8 x s8>) = G_SPLAT_VECTOR %2(s64)
+    $v8 = COPY %0(<vscale x 8 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv16i8
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv16i8
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_M2_:%[0-9]+]]:vrm2 = PseudoVMV_V_X_M2 [[DEF]], [[COPY]], -1, 3 /* e8 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8m2 = COPY [[PseudoVMV_V_X_M2_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %3:gprb(s32) = G_CONSTANT i32 0
+    %2:gprb(s64) = G_ANYEXT %3(s32)
+    %0:vrb(<vscale x 16 x s8>) = G_SPLAT_VECTOR %2(s64)
+    $v8m2 = COPY %0(<vscale x 16 x s8>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            splat_zero_nxv32i8
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv32i8
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_M4_:%[0-9]+]]:vrm4 = PseudoVMV_V_X_M4 [[DEF]], [[COPY]], -1, 3 /* e8 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8m4 = COPY [[PseudoVMV_V_X_M4_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %3:gprb(s32) = G_CONSTANT i32 0
+    %2:gprb(s64) = G_ANYEXT %3(s32)
+    %0:vrb(<vscale x 32 x s8>) = G_SPLAT_VECTOR %2(s64)
+    $v8m4 = COPY %0(<vscale x 32 x s8>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            splat_zero_nxv64i8
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv64i8
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_M8_:%[0-9]+]]:vrm8 = PseudoVMV_V_X_M8 [[DEF]], [[COPY]], -1, 3 /* e8 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8m8 = COPY [[PseudoVMV_V_X_M8_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %3:gprb(s32) = G_CONSTANT i32 0
+    %2:gprb(s64) = G_ANYEXT %3(s32)
+    %0:vrb(<vscale x 64 x s8>) = G_SPLAT_VECTOR %2(s64)
+    $v8m8 = COPY %0(<vscale x 64 x s8>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            splat_zero_nxv1i16
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv1i16
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_MF4_:%[0-9]+]]:vr = PseudoVMV_V_X_MF4 [[DEF]], [[COPY]], -1, 4 /* e16 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8 = COPY [[PseudoVMV_V_X_MF4_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %3:gprb(s32) = G_CONSTANT i32 0
+    %2:gprb(s64) = G_ANYEXT %3(s32)
+    %0:vrb(<vscale x 1 x s16>) = G_SPLAT_VECTOR %2(s64)
+    $v8 = COPY %0(<vscale x 1 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv2i16
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv2i16
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_MF2_:%[0-9]+]]:vr = PseudoVMV_V_X_MF2 [[DEF]], [[COPY]], -1, 4 /* e16 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8 = COPY [[PseudoVMV_V_X_MF2_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %3:gprb(s32) = G_CONSTANT i32 0
+    %2:gprb(s64) = G_ANYEXT %3(s32)
+    %0:vrb(<vscale x 2 x s16>) = G_SPLAT_VECTOR %2(s64)
+    $v8 = COPY %0(<vscale x 2 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv4i16
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv4i16
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_M1_:%[0-9]+]]:vr = PseudoVMV_V_X_M1 [[DEF]], [[COPY]], -1, 4 /* e16 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8 = COPY [[PseudoVMV_V_X_M1_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %3:gprb(s32) = G_CONSTANT i32 0
+    %2:gprb(s64) = G_ANYEXT %3(s32)
+    %0:vrb(<vscale x 4 x s16>) = G_SPLAT_VECTOR %2(s64)
+    $v8 = COPY %0(<vscale x 4 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv8i16
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv8i16
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_M2_:%[0-9]+]]:vrm2 = PseudoVMV_V_X_M2 [[DEF]], [[COPY]], -1, 4 /* e16 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8m2 = COPY [[PseudoVMV_V_X_M2_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %3:gprb(s32) = G_CONSTANT i32 0
+    %2:gprb(s64) = G_ANYEXT %3(s32)
+    %0:vrb(<vscale x 8 x s16>) = G_SPLAT_VECTOR %2(s64)
+    $v8m2 = COPY %0(<vscale x 8 x s16>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            splat_zero_nxv16i16
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv16i16
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_M4_:%[0-9]+]]:vrm4 = PseudoVMV_V_X_M4 [[DEF]], [[COPY]], -1, 4 /* e16 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8m4 = COPY [[PseudoVMV_V_X_M4_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %3:gprb(s32) = G_CONSTANT i32 0
+    %2:gprb(s64) = G_ANYEXT %3(s32)
+    %0:vrb(<vscale x 16 x s16>) = G_SPLAT_VECTOR %2(s64)
+    $v8m4 = COPY %0(<vscale x 16 x s16>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            splat_zero_nxv32i16
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv32i16
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_M8_:%[0-9]+]]:vrm8 = PseudoVMV_V_X_M8 [[DEF]], [[COPY]], -1, 4 /* e16 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8m8 = COPY [[PseudoVMV_V_X_M8_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %3:gprb(s32) = G_CONSTANT i32 0
+    %2:gprb(s64) = G_ANYEXT %3(s32)
+    %0:vrb(<vscale x 32 x s16>) = G_SPLAT_VECTOR %2(s64)
+    $v8m8 = COPY %0(<vscale x 32 x s16>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            splat_zero_nxv1i32
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv1i32
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_MF2_:%[0-9]+]]:vr = PseudoVMV_V_X_MF2 [[DEF]], [[COPY]], -1, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8 = COPY [[PseudoVMV_V_X_MF2_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %1:gprb(s32) = G_CONSTANT i32 0
+    %2:gprb(s64) = G_ANYEXT %1(s32)
+    %0:vrb(<vscale x 1 x s32>) = G_SPLAT_VECTOR %2(s64)
+    $v8 = COPY %0(<vscale x 1 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv2i32
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv2i32
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_M1_:%[0-9]+]]:vr = PseudoVMV_V_X_M1 [[DEF]], [[COPY]], -1, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8 = COPY [[PseudoVMV_V_X_M1_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %1:gprb(s32) = G_CONSTANT i32 0
+    %2:gprb(s64) = G_ANYEXT %1(s32)
+    %0:vrb(<vscale x 2 x s32>) = G_SPLAT_VECTOR %2(s64)
+    $v8 = COPY %0(<vscale x 2 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv4i32
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv4i32
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_M2_:%[0-9]+]]:vrm2 = PseudoVMV_V_X_M2 [[DEF]], [[COPY]], -1, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8m2 = COPY [[PseudoVMV_V_X_M2_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %1:gprb(s32) = G_CONSTANT i32 0
+    %2:gprb(s64) = G_ANYEXT %1(s32)
+    %0:vrb(<vscale x 4 x s32>) = G_SPLAT_VECTOR %2(s64)
+    $v8m2 = COPY %0(<vscale x 4 x s32>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            splat_zero_nxv8i32
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv8i32
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_M4_:%[0-9]+]]:vrm4 = PseudoVMV_V_X_M4 [[DEF]], [[COPY]], -1, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8m4 = COPY [[PseudoVMV_V_X_M4_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %1:gprb(s32) = G_CONSTANT i32 0
+    %2:gprb(s64) = G_ANYEXT %1(s32)
+    %0:vrb(<vscale x 8 x s32>) = G_SPLAT_VECTOR %2(s64)
+    $v8m4 = COPY %0(<vscale x 8 x s32>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            splat_zero_nxv16i32
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv16i32
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_M8_:%[0-9]+]]:vrm8 = PseudoVMV_V_X_M8 [[DEF]], [[COPY]], -1, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8m8 = COPY [[PseudoVMV_V_X_M8_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %1:gprb(s32) = G_CONSTANT i32 0
+    %2:gprb(s64) = G_ANYEXT %1(s32)
+    %0:vrb(<vscale x 16 x s32>) = G_SPLAT_VECTOR %2(s64)
+    $v8m8 = COPY %0(<vscale x 16 x s32>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            splat_zero_nxv1i64
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv1i64
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_M1_:%[0-9]+]]:vr = PseudoVMV_V_X_M1 [[DEF]], [[COPY]], -1, 6 /* e64 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8 = COPY [[PseudoVMV_V_X_M1_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %1:gprb(s64) = G_CONSTANT i64 0
+    %0:vrb(<vscale x 1 x s64>) = G_SPLAT_VECTOR %1(s64)
+    $v8 = COPY %0(<vscale x 1 x s64>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv2i64
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv2i64
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_M2_:%[0-9]+]]:vrm2 = PseudoVMV_V_X_M2 [[DEF]], [[COPY]], -1, 6 /* e64 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8m2 = COPY [[PseudoVMV_V_X_M2_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %1:gprb(s64) = G_CONSTANT i64 0
+    %0:vrb(<vscale x 2 x s64>) = G_SPLAT_VECTOR %1(s64)
+    $v8m2 = COPY %0(<vscale x 2 x s64>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            splat_zero_nxv4i64
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv4i64
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_M4_:%[0-9]+]]:vrm4 = PseudoVMV_V_X_M4 [[DEF]], [[COPY]], -1, 6 /* e64 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8m4 = COPY [[PseudoVMV_V_X_M4_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %1:gprb(s64) = G_CONSTANT i64 0
+    %0:vrb(<vscale x 4 x s64>) = G_SPLAT_VECTOR %1(s64)
+    $v8m4 = COPY %0(<vscale x 4 x s64>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            splat_zero_nxv8i64
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv8i64
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_M8_:%[0-9]+]]:vrm8 = PseudoVMV_V_X_M8 [[DEF]], [[COPY]], -1, 6 /* e64 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8m8 = COPY [[PseudoVMV_V_X_M8_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %1:gprb(s64) = G_CONSTANT i64 0
+    %0:vrb(<vscale x 8 x s64>) = G_SPLAT_VECTOR %1(s64)
+    $v8m8 = COPY %0(<vscale x 8 x s64>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            splat_zero_nxv1f32
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv1f32
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[FMV_W_X:%[0-9]+]]:fpr32 = FMV_W_X [[COPY]]
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY [[FMV_W_X]]
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_MF2_:%[0-9]+]]:vr = PseudoVMV_V_X_MF2 [[DEF]], [[COPY1]], -1, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8 = COPY [[PseudoVMV_V_X_MF2_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %1:fprb(s32) = G_FCONSTANT float 0.000000e+00
+    %3:gprb(s32) = COPY %1(s32)
+    %2:gprb(s64) = G_ANYEXT %3(s32)
+    %0:vrb(<vscale x 1 x s32>) = G_SPLAT_VECTOR %2(s64)
+    $v8 = COPY %0(<vscale x 1 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv2f32
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv2f32
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[FMV_W_X:%[0-9]+]]:fpr32 = FMV_W_X [[COPY]]
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY [[FMV_W_X]]
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_M1_:%[0-9]+]]:vr = PseudoVMV_V_X_M1 [[DEF]], [[COPY1]], -1, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8 = COPY [[PseudoVMV_V_X_M1_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %1:fprb(s32) = G_FCONSTANT float 0.000000e+00
+    %3:gprb(s32) = COPY %1(s32)
+    %2:gprb(s64) = G_ANYEXT %3(s32)
+    %0:vrb(<vscale x 2 x s32>) = G_SPLAT_VECTOR %2(s64)
+    $v8 = COPY %0(<vscale x 2 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv4f32
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv4f32
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[FMV_W_X:%[0-9]+]]:fpr32 = FMV_W_X [[COPY]]
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY [[FMV_W_X]]
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_M2_:%[0-9]+]]:vrm2 = PseudoVMV_V_X_M2 [[DEF]], [[COPY1]], -1, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8m2 = COPY [[PseudoVMV_V_X_M2_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %1:fprb(s32) = G_FCONSTANT float 0.000000e+00
+    %3:gprb(s32) = COPY %1(s32)
+    %2:gprb(s64) = G_ANYEXT %3(s32)
+    %0:vrb(<vscale x 4 x s32>) = G_SPLAT_VECTOR %2(s64)
+    $v8m2 = COPY %0(<vscale x 4 x s32>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            splat_zero_nxv8f32
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv8f32
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[FMV_W_X:%[0-9]+]]:fpr32 = FMV_W_X [[COPY]]
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY [[FMV_W_X]]
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_M4_:%[0-9]+]]:vrm4 = PseudoVMV_V_X_M4 [[DEF]], [[COPY1]], -1, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8m4 = COPY [[PseudoVMV_V_X_M4_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %1:fprb(s32) = G_FCONSTANT float 0.000000e+00
+    %3:gprb(s32) = COPY %1(s32)
+    %2:gprb(s64) = G_ANYEXT %3(s32)
+    %0:vrb(<vscale x 8 x s32>) = G_SPLAT_VECTOR %2(s64)
+    $v8m4 = COPY %0(<vscale x 8 x s32>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            splat_zero_nxv16f32
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv16f32
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[FMV_W_X:%[0-9]+]]:fpr32 = FMV_W_X [[COPY]]
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY [[FMV_W_X]]
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_M8_:%[0-9]+]]:vrm8 = PseudoVMV_V_X_M8 [[DEF]], [[COPY1]], -1, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8m8 = COPY [[PseudoVMV_V_X_M8_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %1:fprb(s32) = G_FCONSTANT float 0.000000e+00
+    %3:gprb(s32) = COPY %1(s32)
+    %2:gprb(s64) = G_ANYEXT %3(s32)
+    %0:vrb(<vscale x 16 x s32>) = G_SPLAT_VECTOR %2(s64)
+    $v8m8 = COPY %0(<vscale x 16 x s32>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            splat_zero_nxv1f64
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv1f64
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[FMV_D_X:%[0-9]+]]:fpr64 = FMV_D_X [[COPY]]
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY [[FMV_D_X]]
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_M1_:%[0-9]+]]:vr = PseudoVMV_V_X_M1 [[DEF]], [[COPY1]], -1, 6 /* e64 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8 = COPY [[PseudoVMV_V_X_M1_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %1:fprb(s64) = G_FCONSTANT double 0.000000e+00
+    %2:gprb(s64) = COPY %1(s64)
+    %0:vrb(<vscale x 1 x s64>) = G_SPLAT_VECTOR %2(s64)
+    $v8 = COPY %0(<vscale x 1 x s64>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv2f64
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv2f64
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[FMV_D_X:%[0-9]+]]:fpr64 = FMV_D_X [[COPY]]
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY [[FMV_D_X]]
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_M2_:%[0-9]+]]:vrm2 = PseudoVMV_V_X_M2 [[DEF]], [[COPY1]], -1, 6 /* e64 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8m2 = COPY [[PseudoVMV_V_X_M2_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %1:fprb(s64) = G_FCONSTANT double 0.000000e+00
+    %2:gprb(s64) = COPY %1(s64)
+    %0:vrb(<vscale x 2 x s64>) = G_SPLAT_VECTOR %2(s64)
+    $v8m2 = COPY %0(<vscale x 2 x s64>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            splat_zero_nxvf64
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxvf64
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[FMV_D_X:%[0-9]+]]:fpr64 = FMV_D_X [[COPY]]
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY [[FMV_D_X]]
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_M4_:%[0-9]+]]:vrm4 = PseudoVMV_V_X_M4 [[DEF]], [[COPY1]], -1, 6 /* e64 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8m4 = COPY [[PseudoVMV_V_X_M4_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %1:fprb(s64) = G_FCONSTANT double 0.000000e+00
+    %2:gprb(s64) = COPY %1(s64)
+    %0:vrb(<vscale x 4 x s64>) = G_SPLAT_VECTOR %2(s64)
+    $v8m4 = COPY %0(<vscale x 4 x s64>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            splat_zero_nxv8f64
+legalized:       true
+regBankSelected: true
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv8f64
+    ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
+    ; CHECK-NEXT: [[FMV_D_X:%[0-9]+]]:fpr64 = FMV_D_X [[COPY]]
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY [[FMV_D_X]]
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[PseudoVMV_V_X_M8_:%[0-9]+]]:vrm8 = PseudoVMV_V_X_M8 [[DEF]], [[COPY1]], -1, 6 /* e64 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8m8 = COPY [[PseudoVMV_V_X_M8_]]
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %1:fprb(s64) = G_FCONSTANT double 0.000000e+00
+    %2:gprb(s64) = COPY %1(s64)
+    %0:vrb(<vscale x 8 x s64>) = G_SPLAT_VECTOR %2(s64)
+    $v8m8 = COPY %0(<vscale x 8 x s64>)
+    PseudoRET implicit $v8m8
+
+...

>From 6a91a29a6e5d165fce2a8c0afb7341176fc97aeb Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Fri, 4 Oct 2024 11:47:59 -0700
Subject: [PATCH 2/6] fixup! add end-to-end tests

---
 .../RISCV/GlobalISel/rvv/splat-vector.ll      | 427 +++++++++++++++++-
 1 file changed, 424 insertions(+), 3 deletions(-)

diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/splat-vector.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/splat-vector.ll
index 4e58c4dcec2d85..1381c2f7a2bee3 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/splat-vector.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/splat-vector.ll
@@ -67,7 +67,428 @@ define <vscale x 64 x i1> @splat_zero_nxv64i1() {
   ret <vscale x 64 x i1> zeroinitializer
 }
 
+define <vscale x 1 x i8> @splat_zero_nxv1i8() {
+; CHECK-LABEL: splat_zero_nxv1i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, zero
+; CHECK-NEXT:    ret
+  ret <vscale x 1 x i8> zeroinitializer
+}
+
+define <vscale x 2 x i8> @splat_zero_nxv2i8() {
+; CHECK-LABEL: splat_zero_nxv2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, zero
+; CHECK-NEXT:    ret
+  ret <vscale x 2 x i8> zeroinitializer
+}
+
+define <vscale x 4 x i8> @splat_zero_nxv4i8() {
+; CHECK-LABEL: splat_zero_nxv4i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, zero
+; CHECK-NEXT:    ret
+  ret <vscale x 4 x i8> zeroinitializer
+}
+
+define <vscale x 8 x i8> @splat_zero_nxv8i8() {
+; CHECK-LABEL: splat_zero_nxv8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, zero
+; CHECK-NEXT:    ret
+  ret <vscale x 8 x i8> zeroinitializer
+}
+
+define <vscale x 16 x i8> @splat_zero_nxv16i8() {
+; CHECK-LABEL: splat_zero_nxv16i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, zero
+; CHECK-NEXT:    ret
+  ret <vscale x 16 x i8> zeroinitializer
+}
+
+define <vscale x 32 x i8> @splat_zero_nxv32i8() {
+; CHECK-LABEL: splat_zero_nxv32i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, zero
+; CHECK-NEXT:    ret
+  ret <vscale x 32 x i8> zeroinitializer
+}
+
+define <vscale x 64 x i8> @splat_zero_nxv64i8() {
+; CHECK-LABEL: splat_zero_nxv64i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, zero
+; CHECK-NEXT:    ret
+  ret <vscale x 64 x i8> zeroinitializer
+}
+
+define <vscale x 1 x i16> @splat_zero_nxv1i16() {
+; CHECK-LABEL: splat_zero_nxv1i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, zero
+; CHECK-NEXT:    ret
+  ret <vscale x 1 x i16> zeroinitializer
+}
+
+define <vscale x 2 x i16> @splat_zero_nxv2i16() {
+; CHECK-LABEL: splat_zero_nxv2i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, zero
+; CHECK-NEXT:    ret
+  ret <vscale x 2 x i16> zeroinitializer
+}
+
+define <vscale x 4 x i16> @splat_zero_nxv4i16() {
+; CHECK-LABEL: splat_zero_nxv4i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, zero
+; CHECK-NEXT:    ret
+  ret <vscale x 4 x i16> zeroinitializer
+}
+
+define <vscale x 8 x i16> @splat_zero_nxv8i16() {
+; CHECK-LABEL: splat_zero_nxv8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, zero
+; CHECK-NEXT:    ret
+  ret <vscale x 8 x i16> zeroinitializer
+}
+
+define <vscale x 16 x i16> @splat_zero_nxv16i16() {
+; CHECK-LABEL: splat_zero_nxv16i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, zero
+; CHECK-NEXT:    ret
+  ret <vscale x 16 x i16> zeroinitializer
+}
+
+define <vscale x 32 x i16> @splat_zero_nxv32i16() {
+; CHECK-LABEL: splat_zero_nxv32i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, zero
+; CHECK-NEXT:    ret
+  ret <vscale x 32 x i16> zeroinitializer
+}
+
+define <vscale x 1 x i32> @splat_zero_nxv1i32() {
+; CHECK-LABEL: splat_zero_nxv1i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, zero
+; CHECK-NEXT:    ret
+  ret <vscale x 1 x i32> zeroinitializer
+}
+
+define <vscale x 2 x i32> @splat_zero_nxv2i32() {
+; CHECK-LABEL: splat_zero_nxv2i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, zero
+; CHECK-NEXT:    ret
+  ret <vscale x 2 x i32> zeroinitializer
+}
+
+define <vscale x 4 x i32> @splat_zero_nxv4i32() {
+; CHECK-LABEL: splat_zero_nxv4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, zero
+; CHECK-NEXT:    ret
+  ret <vscale x 4 x i32> zeroinitializer
+}
+
+define <vscale x 8 x i32> @splat_zero_nxv8i32() {
+; CHECK-LABEL: splat_zero_nxv8i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, zero
+; CHECK-NEXT:    ret
+  ret <vscale x 8 x i32> zeroinitializer
+}
+
+define <vscale x 16 x i32> @splat_zero_nxv16i32() {
+; CHECK-LABEL: splat_zero_nxv16i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, zero
+; CHECK-NEXT:    ret
+  ret <vscale x 16 x i32> zeroinitializer
+}
+
+define <vscale x 1 x i64> @splat_zero_nxv1i64() {
+; RV32-LABEL: splat_zero_nxv1i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    sw zero, 8(sp)
+; RV32-NEXT:    sw zero, 12(sp)
+; RV32-NEXT:    fld fa5, 8(sp)
+; RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; RV32-NEXT:    vfmv.v.f v8, fa5
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: splat_zero_nxv1i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; RV64-NEXT:    vmv.v.x v8, zero
+; RV64-NEXT:    ret
+  ret <vscale x 1 x i64> zeroinitializer
+}
+
+define <vscale x 2 x i64> @splat_zero_nxv2i64() {
+; RV32-LABEL: splat_zero_nxv2i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    sw zero, 8(sp)
+; RV32-NEXT:    sw zero, 12(sp)
+; RV32-NEXT:    fld fa5, 8(sp)
+; RV32-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; RV32-NEXT:    vfmv.v.f v8, fa5
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: splat_zero_nxv2i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; RV64-NEXT:    vmv.v.x v8, zero
+; RV64-NEXT:    ret
+  ret <vscale x 2 x i64> zeroinitializer
+}
+
+define <vscale x 4 x i64> @splat_zero_nxv4i64() {
+; RV32-LABEL: splat_zero_nxv4i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    sw zero, 8(sp)
+; RV32-NEXT:    sw zero, 12(sp)
+; RV32-NEXT:    fld fa5, 8(sp)
+; RV32-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; RV32-NEXT:    vfmv.v.f v8, fa5
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: splat_zero_nxv4i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; RV64-NEXT:    vmv.v.x v8, zero
+; RV64-NEXT:    ret
+  ret <vscale x 4 x i64> zeroinitializer
+}
+
+define <vscale x 8 x i64> @splat_zero_nxv8i64() {
+; RV32-LABEL: splat_zero_nxv8i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    sw zero, 8(sp)
+; RV32-NEXT:    sw zero, 12(sp)
+; RV32-NEXT:    fld fa5, 8(sp)
+; RV32-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV32-NEXT:    vfmv.v.f v8, fa5
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: splat_zero_nxv8i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV64-NEXT:    vmv.v.x v8, zero
+; RV64-NEXT:    ret
+  ret <vscale x 8 x i64> zeroinitializer
+}
+
+define <vscale x 1 x float> @splat_zero_nxv1f32() {
+; RV32-LABEL: splat_zero_nxv1f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    fmv.w.x fa5, zero
+; RV32-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; RV32-NEXT:    vfmv.v.f v8, fa5
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: splat_zero_nxv1f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    fmv.w.x fa5, zero
+; RV64-NEXT:    fmv.x.w a0, fa5
+; RV64-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
+; RV64-NEXT:    vmv.v.x v8, a0
+; RV64-NEXT:    ret
+  ret <vscale x 1 x float> zeroinitializer
+}
+
+define <vscale x 2 x float> @splat_zero_nxv2f32() {
+; RV32-LABEL: splat_zero_nxv2f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    fmv.w.x fa5, zero
+; RV32-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; RV32-NEXT:    vfmv.v.f v8, fa5
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: splat_zero_nxv2f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    fmv.w.x fa5, zero
+; RV64-NEXT:    fmv.x.w a0, fa5
+; RV64-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
+; RV64-NEXT:    vmv.v.x v8, a0
+; RV64-NEXT:    ret
+  ret <vscale x 2 x float> zeroinitializer
+}
+
+define <vscale x 4 x float> @splat_zero_nxv4f32() {
+; RV32-LABEL: splat_zero_nxv4f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    fmv.w.x fa5, zero
+; RV32-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; RV32-NEXT:    vfmv.v.f v8, fa5
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: splat_zero_nxv4f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    fmv.w.x fa5, zero
+; RV64-NEXT:    fmv.x.w a0, fa5
+; RV64-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
+; RV64-NEXT:    vmv.v.x v8, a0
+; RV64-NEXT:    ret
+  ret <vscale x 4 x float> zeroinitializer
+}
+
+define <vscale x 8 x float> @splat_zero_nxv8f32() {
+; RV32-LABEL: splat_zero_nxv8f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    fmv.w.x fa5, zero
+; RV32-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; RV32-NEXT:    vfmv.v.f v8, fa5
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: splat_zero_nxv8f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    fmv.w.x fa5, zero
+; RV64-NEXT:    fmv.x.w a0, fa5
+; RV64-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
+; RV64-NEXT:    vmv.v.x v8, a0
+; RV64-NEXT:    ret
+  ret <vscale x 8 x float> zeroinitializer
+}
+
+define <vscale x 16 x float> @splat_zero_nxv16f32() {
+; RV32-LABEL: splat_zero_nxv16f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    fmv.w.x fa5, zero
+; RV32-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
+; RV32-NEXT:    vfmv.v.f v8, fa5
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: splat_zero_nxv16f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    fmv.w.x fa5, zero
+; RV64-NEXT:    fmv.x.w a0, fa5
+; RV64-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
+; RV64-NEXT:    vmv.v.x v8, a0
+; RV64-NEXT:    ret
+  ret <vscale x 16 x float> zeroinitializer
+}
+
+define <vscale x 1 x double> @splat_zero_nxv1f64() {
+; RV32-LABEL: splat_zero_nxv1f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    sw zero, 8(sp)
+; RV32-NEXT:    sw zero, 12(sp)
+; RV32-NEXT:    fld fa5, 8(sp)
+; RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; RV32-NEXT:    vfmv.v.f v8, fa5
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: splat_zero_nxv1f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    fmv.d.x fa5, zero
+; RV64-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; RV64-NEXT:    vfmv.v.f v8, fa5
+; RV64-NEXT:    ret
+  ret <vscale x 1 x double> zeroinitializer
+}
+
+define <vscale x 2 x double> @splat_zero_nxv2f64() {
+; RV32-LABEL: splat_zero_nxv2f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    sw zero, 8(sp)
+; RV32-NEXT:    sw zero, 12(sp)
+; RV32-NEXT:    fld fa5, 8(sp)
+; RV32-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; RV32-NEXT:    vfmv.v.f v8, fa5
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: splat_zero_nxv2f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    fmv.d.x fa5, zero
+; RV64-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; RV64-NEXT:    vfmv.v.f v8, fa5
+; RV64-NEXT:    ret
+  ret <vscale x 2 x double> zeroinitializer
+}
+
+define <vscale x 4 x double> @splat_zero_nxv4f64() {
+; RV32-LABEL: splat_zero_nxv4f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    sw zero, 8(sp)
+; RV32-NEXT:    sw zero, 12(sp)
+; RV32-NEXT:    fld fa5, 8(sp)
+; RV32-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; RV32-NEXT:    vfmv.v.f v8, fa5
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: splat_zero_nxv4f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    fmv.d.x fa5, zero
+; RV64-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; RV64-NEXT:    vfmv.v.f v8, fa5
+; RV64-NEXT:    ret
+  ret <vscale x 4 x double> zeroinitializer
+}
+
+define <vscale x 8 x double> @splat_zero_nxv8f64() {
+; RV32-LABEL: splat_zero_nxv8f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    sw zero, 8(sp)
+; RV32-NEXT:    sw zero, 12(sp)
+; RV32-NEXT:    fld fa5, 8(sp)
+; RV32-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV32-NEXT:    vfmv.v.f v8, fa5
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: splat_zero_nxv8f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    fmv.d.x fa5, zero
+; RV64-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV64-NEXT:    vfmv.v.f v8, fa5
+; RV64-NEXT:    ret
+  ret <vscale x 8 x double> zeroinitializer
+}
+
 
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; RV32: {{.*}}
-; RV64: {{.*}}

>From 5d7793293fd2e17ac7b03cd6d4626de93ee5f7dd Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Fri, 4 Oct 2024 11:54:45 -0700
Subject: [PATCH 3/6] fixup! only do this when we start with G_SPLAT_VECTOR

---
 llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
index eb3152ad768890..630f0aa0f1141a 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
@@ -530,8 +530,9 @@ bool RISCVInstructionSelector::select(MachineInstr &MI) {
   MachineFunction &MF = *MBB.getParent();
   MachineIRBuilder MIB(MI);
 
-  preISelLower(MI, MIB);
   const unsigned Opc = MI.getOpcode();
+  bool OpcWasGSplatVector = Opc == TargetOpcode::G_SPLAT_VECTOR;
+  preISelLower(MI, MIB);
 
   if (!MI.isPreISelOpcode() || Opc == TargetOpcode::G_PHI) {
     if (Opc == TargetOpcode::PHI || Opc == TargetOpcode::G_PHI) {
@@ -573,7 +574,7 @@ bool RISCVInstructionSelector::select(MachineInstr &MI) {
   // the MI is lowered, since renderVLOp needs to see the G_CONSTANT. It would
   // be nice if the InstructionSelector selected these instructions without
   // needing to call select on them explicitly.
-  if (Opc == RISCV::G_VMV_V_X_VL || Opc == RISCV::G_VFMV_V_F_VL) {
+  if (OpcWasGSplatVector) {
     MachineInstr *Passthru = MRI->getVRegDef(MI.getOperand(1).getReg());
     MachineInstr *VL = MRI->getVRegDef(MI.getOperand(3).getReg());
     if (selectImpl(MI, *CoverageInfo))

>From c5857129d259a1c2d859939e379f74fe9a694187 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Fri, 4 Oct 2024 13:21:48 -0700
Subject: [PATCH 4/6] fixup! opc needs to be loaded after preISelLower

---
 llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
index 630f0aa0f1141a..de359384e4debe 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
@@ -530,9 +530,9 @@ bool RISCVInstructionSelector::select(MachineInstr &MI) {
   MachineFunction &MF = *MBB.getParent();
   MachineIRBuilder MIB(MI);
 
-  const unsigned Opc = MI.getOpcode();
-  bool OpcWasGSplatVector = Opc == TargetOpcode::G_SPLAT_VECTOR;
+  bool OpcWasGSplatVector = MI.getOpcode() == TargetOpcode::G_SPLAT_VECTOR;
   preISelLower(MI, MIB);
+  const unsigned Opc = MI.getOpcode();
 
   if (!MI.isPreISelOpcode() || Opc == TargetOpcode::G_PHI) {
     if (Opc == TargetOpcode::PHI || Opc == TargetOpcode::G_PHI) {

>From 7ac9ecba4900a4f0e8c4ded2cda7157093be3844 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Fri, 4 Oct 2024 13:49:08 -0700
Subject: [PATCH 5/6] fixup! add more tests

---
 llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp  |   10 +-
 .../rvv/splatvector-rv32.mir                  |   31 +
 .../rvv/splatvector-rv64.mir                  |   31 +
 .../RISCV/GlobalISel/rvv/splat-vector.ll      | 1128 +++++++++++++++++
 4 files changed, 1198 insertions(+), 2 deletions(-)

diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 932287d1403021..065f6c9165a133 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -3303,8 +3303,14 @@ bool IRTranslator::translateShuffleVector(const User &U,
   // poison are treated as zeroinitializer here).
   if (U.getOperand(0)->getType()->isScalableTy()) {
     Register Val = getOrCreateVReg(*U.getOperand(0));
-    auto SplatVal = MIRBuilder.buildExtractVectorElementConstant(
-        MRI->getType(Val).getElementType(), Val, 0);
+    // We don't use buildExtractVectorElementConstant because it creates
+    // problems for CSE since the constant gets placed in a different basic
+    // block.
+    unsigned VecIdxWidth = TLI->getVectorIdxTy(*DL).getSizeInBits();
+    auto *IdxCI = ConstantInt::get(U.getContext(), APInt(VecIdxWidth, 0));
+    Register Idx = getOrCreateVReg(*IdxCI);
+    auto SplatVal = MIRBuilder.buildExtractVectorElement(
+        MRI->getType(Val).getElementType(), Val, Idx);
     MIRBuilder.buildSplatVector(getOrCreateVReg(U), SplatVal);
     return true;
   }
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/splatvector-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/splatvector-rv32.mir
index 4066c4c36a8d53..d5eb80e4191c0a 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/splatvector-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/splatvector-rv32.mir
@@ -10,6 +10,7 @@ body:             |
     ; CHECK-LABEL: name: splat_zero_nxv1i8
     ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_MF8_:%[0-9]+]]:vr = PseudoVMV_V_X_MF8 [[DEF]], [[COPY]], -1, 3 /* e8 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8 = COPY [[PseudoVMV_V_X_MF8_]]
     ; CHECK-NEXT: PseudoRET implicit $v8
@@ -28,6 +29,7 @@ body:             |
     ; CHECK-LABEL: name: splat_zero_nxv2i8
     ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_MF4_:%[0-9]+]]:vr = PseudoVMV_V_X_MF4 [[DEF]], [[COPY]], -1, 3 /* e8 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8 = COPY [[PseudoVMV_V_X_MF4_]]
     ; CHECK-NEXT: PseudoRET implicit $v8
@@ -46,6 +48,7 @@ body:             |
     ; CHECK-LABEL: name: splat_zero_nxv4i8
     ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_MF2_:%[0-9]+]]:vr = PseudoVMV_V_X_MF2 [[DEF]], [[COPY]], -1, 3 /* e8 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8 = COPY [[PseudoVMV_V_X_MF2_]]
     ; CHECK-NEXT: PseudoRET implicit $v8
@@ -64,6 +67,7 @@ body:             |
     ; CHECK-LABEL: name: splat_zero_nxv8i8
     ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_M1_:%[0-9]+]]:vr = PseudoVMV_V_X_M1 [[DEF]], [[COPY]], -1, 3 /* e8 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8 = COPY [[PseudoVMV_V_X_M1_]]
     ; CHECK-NEXT: PseudoRET implicit $v8
@@ -82,6 +86,7 @@ body:             |
     ; CHECK-LABEL: name: splat_zero_nxv16i8
     ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_M2_:%[0-9]+]]:vrm2 = PseudoVMV_V_X_M2 [[DEF]], [[COPY]], -1, 3 /* e8 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8m2 = COPY [[PseudoVMV_V_X_M2_]]
     ; CHECK-NEXT: PseudoRET implicit $v8m2
@@ -100,6 +105,7 @@ body:             |
     ; CHECK-LABEL: name: splat_zero_nxv32i8
     ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_M4_:%[0-9]+]]:vrm4 = PseudoVMV_V_X_M4 [[DEF]], [[COPY]], -1, 3 /* e8 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8m4 = COPY [[PseudoVMV_V_X_M4_]]
     ; CHECK-NEXT: PseudoRET implicit $v8m4
@@ -118,6 +124,7 @@ body:             |
     ; CHECK-LABEL: name: splat_zero_nxv64i8
     ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_M8_:%[0-9]+]]:vrm8 = PseudoVMV_V_X_M8 [[DEF]], [[COPY]], -1, 3 /* e8 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8m8 = COPY [[PseudoVMV_V_X_M8_]]
     ; CHECK-NEXT: PseudoRET implicit $v8m8
@@ -136,6 +143,7 @@ body:             |
     ; CHECK-LABEL: name: splat_zero_nxv1i16
     ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_MF4_:%[0-9]+]]:vr = PseudoVMV_V_X_MF4 [[DEF]], [[COPY]], -1, 4 /* e16 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8 = COPY [[PseudoVMV_V_X_MF4_]]
     ; CHECK-NEXT: PseudoRET implicit $v8
@@ -154,6 +162,7 @@ body:             |
     ; CHECK-LABEL: name: splat_zero_nxv2i16
     ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_MF2_:%[0-9]+]]:vr = PseudoVMV_V_X_MF2 [[DEF]], [[COPY]], -1, 4 /* e16 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8 = COPY [[PseudoVMV_V_X_MF2_]]
     ; CHECK-NEXT: PseudoRET implicit $v8
@@ -172,6 +181,7 @@ body:             |
     ; CHECK-LABEL: name: splat_zero_nxv4i16
     ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_M1_:%[0-9]+]]:vr = PseudoVMV_V_X_M1 [[DEF]], [[COPY]], -1, 4 /* e16 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8 = COPY [[PseudoVMV_V_X_M1_]]
     ; CHECK-NEXT: PseudoRET implicit $v8
@@ -190,6 +200,7 @@ body:             |
     ; CHECK-LABEL: name: splat_zero_nxv8i16
     ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_M2_:%[0-9]+]]:vrm2 = PseudoVMV_V_X_M2 [[DEF]], [[COPY]], -1, 4 /* e16 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8m2 = COPY [[PseudoVMV_V_X_M2_]]
     ; CHECK-NEXT: PseudoRET implicit $v8m2
@@ -208,6 +219,7 @@ body:             |
     ; CHECK-LABEL: name: splat_zero_nxv16i16
     ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_M4_:%[0-9]+]]:vrm4 = PseudoVMV_V_X_M4 [[DEF]], [[COPY]], -1, 4 /* e16 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8m4 = COPY [[PseudoVMV_V_X_M4_]]
     ; CHECK-NEXT: PseudoRET implicit $v8m4
@@ -226,6 +238,7 @@ body:             |
     ; CHECK-LABEL: name: splat_zero_nxv32i16
     ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_M8_:%[0-9]+]]:vrm8 = PseudoVMV_V_X_M8 [[DEF]], [[COPY]], -1, 4 /* e16 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8m8 = COPY [[PseudoVMV_V_X_M8_]]
     ; CHECK-NEXT: PseudoRET implicit $v8m8
@@ -244,6 +257,7 @@ body:             |
     ; CHECK-LABEL: name: splat_zero_nxv1i32
     ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_MF2_:%[0-9]+]]:vr = PseudoVMV_V_X_MF2 [[DEF]], [[COPY]], -1, 5 /* e32 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8 = COPY [[PseudoVMV_V_X_MF2_]]
     ; CHECK-NEXT: PseudoRET implicit $v8
@@ -262,6 +276,7 @@ body:             |
     ; CHECK-LABEL: name: splat_zero_nxv2i32
     ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_M1_:%[0-9]+]]:vr = PseudoVMV_V_X_M1 [[DEF]], [[COPY]], -1, 5 /* e32 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8 = COPY [[PseudoVMV_V_X_M1_]]
     ; CHECK-NEXT: PseudoRET implicit $v8
@@ -280,6 +295,7 @@ body:             |
     ; CHECK-LABEL: name: splat_zero_nxv4i32
     ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_M2_:%[0-9]+]]:vrm2 = PseudoVMV_V_X_M2 [[DEF]], [[COPY]], -1, 5 /* e32 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8m2 = COPY [[PseudoVMV_V_X_M2_]]
     ; CHECK-NEXT: PseudoRET implicit $v8m2
@@ -298,6 +314,7 @@ body:             |
     ; CHECK-LABEL: name: splat_zero_nxv8i32
     ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_M4_:%[0-9]+]]:vrm4 = PseudoVMV_V_X_M4 [[DEF]], [[COPY]], -1, 5 /* e32 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8m4 = COPY [[PseudoVMV_V_X_M4_]]
     ; CHECK-NEXT: PseudoRET implicit $v8m4
@@ -316,6 +333,7 @@ body:             |
     ; CHECK-LABEL: name: splat_zero_nxv16i32
     ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_M8_:%[0-9]+]]:vrm8 = PseudoVMV_V_X_M8 [[DEF]], [[COPY]], -1, 5 /* e32 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8m8 = COPY [[PseudoVMV_V_X_M8_]]
     ; CHECK-NEXT: PseudoRET implicit $v8m8
@@ -336,6 +354,7 @@ body:             |
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: [[BuildPairF64Pseudo:%[0-9]+]]:fpr64 = BuildPairF64Pseudo [[COPY]], [[COPY1]]
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVFMV_V_FPR64_M1_:%[0-9]+]]:vr = PseudoVFMV_V_FPR64_M1 [[DEF]], [[BuildPairF64Pseudo]], -1, 6 /* e64 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8 = COPY [[PseudoVFMV_V_FPR64_M1_]]
     ; CHECK-NEXT: PseudoRET implicit $v8
@@ -358,6 +377,7 @@ body:             |
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: [[BuildPairF64Pseudo:%[0-9]+]]:fpr64 = BuildPairF64Pseudo [[COPY]], [[COPY1]]
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVFMV_V_FPR64_M2_:%[0-9]+]]:vrm2 = PseudoVFMV_V_FPR64_M2 [[DEF]], [[BuildPairF64Pseudo]], -1, 6 /* e64 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8m2 = COPY [[PseudoVFMV_V_FPR64_M2_]]
     ; CHECK-NEXT: PseudoRET implicit $v8m2
@@ -380,6 +400,7 @@ body:             |
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: [[BuildPairF64Pseudo:%[0-9]+]]:fpr64 = BuildPairF64Pseudo [[COPY]], [[COPY1]]
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVFMV_V_FPR64_M4_:%[0-9]+]]:vrm4 = PseudoVFMV_V_FPR64_M4 [[DEF]], [[BuildPairF64Pseudo]], -1, 6 /* e64 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8m4 = COPY [[PseudoVFMV_V_FPR64_M4_]]
     ; CHECK-NEXT: PseudoRET implicit $v8m4
@@ -402,6 +423,7 @@ body:             |
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: [[BuildPairF64Pseudo:%[0-9]+]]:fpr64 = BuildPairF64Pseudo [[COPY]], [[COPY1]]
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVFMV_V_FPR64_M8_:%[0-9]+]]:vrm8 = PseudoVFMV_V_FPR64_M8 [[DEF]], [[BuildPairF64Pseudo]], -1, 6 /* e64 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8m8 = COPY [[PseudoVFMV_V_FPR64_M8_]]
     ; CHECK-NEXT: PseudoRET implicit $v8m8
@@ -424,6 +446,7 @@ body:             |
     ; CHECK-NEXT: [[FMV_W_X:%[0-9]+]]:fpr32 = FMV_W_X [[COPY]]
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY [[FMV_W_X]]
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_MF2_:%[0-9]+]]:vr = PseudoVMV_V_X_MF2 [[DEF]], [[COPY1]], -1, 5 /* e32 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8 = COPY [[PseudoVMV_V_X_MF2_]]
     ; CHECK-NEXT: PseudoRET implicit $v8
@@ -445,6 +468,7 @@ body:             |
     ; CHECK-NEXT: [[FMV_W_X:%[0-9]+]]:fpr32 = FMV_W_X [[COPY]]
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY [[FMV_W_X]]
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_M1_:%[0-9]+]]:vr = PseudoVMV_V_X_M1 [[DEF]], [[COPY1]], -1, 5 /* e32 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8 = COPY [[PseudoVMV_V_X_M1_]]
     ; CHECK-NEXT: PseudoRET implicit $v8
@@ -466,6 +490,7 @@ body:             |
     ; CHECK-NEXT: [[FMV_W_X:%[0-9]+]]:fpr32 = FMV_W_X [[COPY]]
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY [[FMV_W_X]]
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_M2_:%[0-9]+]]:vrm2 = PseudoVMV_V_X_M2 [[DEF]], [[COPY1]], -1, 5 /* e32 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8m2 = COPY [[PseudoVMV_V_X_M2_]]
     ; CHECK-NEXT: PseudoRET implicit $v8m2
@@ -487,6 +512,7 @@ body:             |
     ; CHECK-NEXT: [[FMV_W_X:%[0-9]+]]:fpr32 = FMV_W_X [[COPY]]
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY [[FMV_W_X]]
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_M4_:%[0-9]+]]:vrm4 = PseudoVMV_V_X_M4 [[DEF]], [[COPY1]], -1, 5 /* e32 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8m4 = COPY [[PseudoVMV_V_X_M4_]]
     ; CHECK-NEXT: PseudoRET implicit $v8m4
@@ -508,6 +534,7 @@ body:             |
     ; CHECK-NEXT: [[FMV_W_X:%[0-9]+]]:fpr32 = FMV_W_X [[COPY]]
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY [[FMV_W_X]]
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_M8_:%[0-9]+]]:vrm8 = PseudoVMV_V_X_M8 [[DEF]], [[COPY1]], -1, 5 /* e32 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8m8 = COPY [[PseudoVMV_V_X_M8_]]
     ; CHECK-NEXT: PseudoRET implicit $v8m8
@@ -529,6 +556,7 @@ body:             |
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: [[BuildPairF64Pseudo:%[0-9]+]]:fpr64 = BuildPairF64Pseudo [[COPY1]], [[COPY]]
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVFMV_V_FPR64_M1_:%[0-9]+]]:vr = PseudoVFMV_V_FPR64_M1 [[DEF]], [[BuildPairF64Pseudo]], -1, 6 /* e64 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8 = COPY [[PseudoVFMV_V_FPR64_M1_]]
     ; CHECK-NEXT: PseudoRET implicit $v8
@@ -549,6 +577,7 @@ body:             |
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: [[BuildPairF64Pseudo:%[0-9]+]]:fpr64 = BuildPairF64Pseudo [[COPY1]], [[COPY]]
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVFMV_V_FPR64_M2_:%[0-9]+]]:vrm2 = PseudoVFMV_V_FPR64_M2 [[DEF]], [[BuildPairF64Pseudo]], -1, 6 /* e64 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8m2 = COPY [[PseudoVFMV_V_FPR64_M2_]]
     ; CHECK-NEXT: PseudoRET implicit $v8m2
@@ -569,6 +598,7 @@ body:             |
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: [[BuildPairF64Pseudo:%[0-9]+]]:fpr64 = BuildPairF64Pseudo [[COPY1]], [[COPY]]
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVFMV_V_FPR64_M4_:%[0-9]+]]:vrm4 = PseudoVFMV_V_FPR64_M4 [[DEF]], [[BuildPairF64Pseudo]], -1, 6 /* e64 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8m4 = COPY [[PseudoVFMV_V_FPR64_M4_]]
     ; CHECK-NEXT: PseudoRET implicit $v8m4
@@ -589,6 +619,7 @@ body:             |
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: [[BuildPairF64Pseudo:%[0-9]+]]:fpr64 = BuildPairF64Pseudo [[COPY1]], [[COPY]]
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVFMV_V_FPR64_M8_:%[0-9]+]]:vrm8 = PseudoVFMV_V_FPR64_M8 [[DEF]], [[BuildPairF64Pseudo]], -1, 6 /* e64 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8m8 = COPY [[PseudoVFMV_V_FPR64_M8_]]
     ; CHECK-NEXT: PseudoRET implicit $v8m8
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/splatvector-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/splatvector-rv64.mir
index 09c6c3b65d7c05..871f78ae6c8a0c 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/splatvector-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/splatvector-rv64.mir
@@ -10,6 +10,7 @@ body:             |
     ; CHECK-LABEL: name: splat_zero_nxv1i8
     ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_MF8_:%[0-9]+]]:vr = PseudoVMV_V_X_MF8 [[DEF]], [[COPY]], -1, 3 /* e8 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8 = COPY [[PseudoVMV_V_X_MF8_]]
     ; CHECK-NEXT: PseudoRET implicit $v8
@@ -29,6 +30,7 @@ body:             |
     ; CHECK-LABEL: name: splat_zero_nxv2i8
     ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_MF4_:%[0-9]+]]:vr = PseudoVMV_V_X_MF4 [[DEF]], [[COPY]], -1, 3 /* e8 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8 = COPY [[PseudoVMV_V_X_MF4_]]
     ; CHECK-NEXT: PseudoRET implicit $v8
@@ -48,6 +50,7 @@ body:             |
     ; CHECK-LABEL: name: splat_zero_nxv4i8
     ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_MF2_:%[0-9]+]]:vr = PseudoVMV_V_X_MF2 [[DEF]], [[COPY]], -1, 3 /* e8 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8 = COPY [[PseudoVMV_V_X_MF2_]]
     ; CHECK-NEXT: PseudoRET implicit $v8
@@ -67,6 +70,7 @@ body:             |
     ; CHECK-LABEL: name: splat_zero_nxv8i8
     ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_M1_:%[0-9]+]]:vr = PseudoVMV_V_X_M1 [[DEF]], [[COPY]], -1, 3 /* e8 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8 = COPY [[PseudoVMV_V_X_M1_]]
     ; CHECK-NEXT: PseudoRET implicit $v8
@@ -86,6 +90,7 @@ body:             |
     ; CHECK-LABEL: name: splat_zero_nxv16i8
     ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_M2_:%[0-9]+]]:vrm2 = PseudoVMV_V_X_M2 [[DEF]], [[COPY]], -1, 3 /* e8 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8m2 = COPY [[PseudoVMV_V_X_M2_]]
     ; CHECK-NEXT: PseudoRET implicit $v8m2
@@ -105,6 +110,7 @@ body:             |
     ; CHECK-LABEL: name: splat_zero_nxv32i8
     ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_M4_:%[0-9]+]]:vrm4 = PseudoVMV_V_X_M4 [[DEF]], [[COPY]], -1, 3 /* e8 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8m4 = COPY [[PseudoVMV_V_X_M4_]]
     ; CHECK-NEXT: PseudoRET implicit $v8m4
@@ -124,6 +130,7 @@ body:             |
     ; CHECK-LABEL: name: splat_zero_nxv64i8
     ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_M8_:%[0-9]+]]:vrm8 = PseudoVMV_V_X_M8 [[DEF]], [[COPY]], -1, 3 /* e8 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8m8 = COPY [[PseudoVMV_V_X_M8_]]
     ; CHECK-NEXT: PseudoRET implicit $v8m8
@@ -143,6 +150,7 @@ body:             |
     ; CHECK-LABEL: name: splat_zero_nxv1i16
     ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_MF4_:%[0-9]+]]:vr = PseudoVMV_V_X_MF4 [[DEF]], [[COPY]], -1, 4 /* e16 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8 = COPY [[PseudoVMV_V_X_MF4_]]
     ; CHECK-NEXT: PseudoRET implicit $v8
@@ -162,6 +170,7 @@ body:             |
     ; CHECK-LABEL: name: splat_zero_nxv2i16
     ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_MF2_:%[0-9]+]]:vr = PseudoVMV_V_X_MF2 [[DEF]], [[COPY]], -1, 4 /* e16 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8 = COPY [[PseudoVMV_V_X_MF2_]]
     ; CHECK-NEXT: PseudoRET implicit $v8
@@ -181,6 +190,7 @@ body:             |
     ; CHECK-LABEL: name: splat_zero_nxv4i16
     ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_M1_:%[0-9]+]]:vr = PseudoVMV_V_X_M1 [[DEF]], [[COPY]], -1, 4 /* e16 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8 = COPY [[PseudoVMV_V_X_M1_]]
     ; CHECK-NEXT: PseudoRET implicit $v8
@@ -200,6 +210,7 @@ body:             |
     ; CHECK-LABEL: name: splat_zero_nxv8i16
     ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_M2_:%[0-9]+]]:vrm2 = PseudoVMV_V_X_M2 [[DEF]], [[COPY]], -1, 4 /* e16 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8m2 = COPY [[PseudoVMV_V_X_M2_]]
     ; CHECK-NEXT: PseudoRET implicit $v8m2
@@ -219,6 +230,7 @@ body:             |
     ; CHECK-LABEL: name: splat_zero_nxv16i16
     ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_M4_:%[0-9]+]]:vrm4 = PseudoVMV_V_X_M4 [[DEF]], [[COPY]], -1, 4 /* e16 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8m4 = COPY [[PseudoVMV_V_X_M4_]]
     ; CHECK-NEXT: PseudoRET implicit $v8m4
@@ -238,6 +250,7 @@ body:             |
     ; CHECK-LABEL: name: splat_zero_nxv32i16
     ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_M8_:%[0-9]+]]:vrm8 = PseudoVMV_V_X_M8 [[DEF]], [[COPY]], -1, 4 /* e16 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8m8 = COPY [[PseudoVMV_V_X_M8_]]
     ; CHECK-NEXT: PseudoRET implicit $v8m8
@@ -257,6 +270,7 @@ body:             |
     ; CHECK-LABEL: name: splat_zero_nxv1i32
     ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_MF2_:%[0-9]+]]:vr = PseudoVMV_V_X_MF2 [[DEF]], [[COPY]], -1, 5 /* e32 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8 = COPY [[PseudoVMV_V_X_MF2_]]
     ; CHECK-NEXT: PseudoRET implicit $v8
@@ -276,6 +290,7 @@ body:             |
     ; CHECK-LABEL: name: splat_zero_nxv2i32
     ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_M1_:%[0-9]+]]:vr = PseudoVMV_V_X_M1 [[DEF]], [[COPY]], -1, 5 /* e32 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8 = COPY [[PseudoVMV_V_X_M1_]]
     ; CHECK-NEXT: PseudoRET implicit $v8
@@ -295,6 +310,7 @@ body:             |
     ; CHECK-LABEL: name: splat_zero_nxv4i32
     ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_M2_:%[0-9]+]]:vrm2 = PseudoVMV_V_X_M2 [[DEF]], [[COPY]], -1, 5 /* e32 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8m2 = COPY [[PseudoVMV_V_X_M2_]]
     ; CHECK-NEXT: PseudoRET implicit $v8m2
@@ -314,6 +330,7 @@ body:             |
     ; CHECK-LABEL: name: splat_zero_nxv8i32
     ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_M4_:%[0-9]+]]:vrm4 = PseudoVMV_V_X_M4 [[DEF]], [[COPY]], -1, 5 /* e32 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8m4 = COPY [[PseudoVMV_V_X_M4_]]
     ; CHECK-NEXT: PseudoRET implicit $v8m4
@@ -333,6 +350,7 @@ body:             |
     ; CHECK-LABEL: name: splat_zero_nxv16i32
     ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_M8_:%[0-9]+]]:vrm8 = PseudoVMV_V_X_M8 [[DEF]], [[COPY]], -1, 5 /* e32 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8m8 = COPY [[PseudoVMV_V_X_M8_]]
     ; CHECK-NEXT: PseudoRET implicit $v8m8
@@ -352,6 +370,7 @@ body:             |
     ; CHECK-LABEL: name: splat_zero_nxv1i64
     ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_M1_:%[0-9]+]]:vr = PseudoVMV_V_X_M1 [[DEF]], [[COPY]], -1, 6 /* e64 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8 = COPY [[PseudoVMV_V_X_M1_]]
     ; CHECK-NEXT: PseudoRET implicit $v8
@@ -370,6 +389,7 @@ body:             |
     ; CHECK-LABEL: name: splat_zero_nxv2i64
     ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_M2_:%[0-9]+]]:vrm2 = PseudoVMV_V_X_M2 [[DEF]], [[COPY]], -1, 6 /* e64 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8m2 = COPY [[PseudoVMV_V_X_M2_]]
     ; CHECK-NEXT: PseudoRET implicit $v8m2
@@ -388,6 +408,7 @@ body:             |
     ; CHECK-LABEL: name: splat_zero_nxv4i64
     ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_M4_:%[0-9]+]]:vrm4 = PseudoVMV_V_X_M4 [[DEF]], [[COPY]], -1, 6 /* e64 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8m4 = COPY [[PseudoVMV_V_X_M4_]]
     ; CHECK-NEXT: PseudoRET implicit $v8m4
@@ -406,6 +427,7 @@ body:             |
     ; CHECK-LABEL: name: splat_zero_nxv8i64
     ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x0
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_M8_:%[0-9]+]]:vrm8 = PseudoVMV_V_X_M8 [[DEF]], [[COPY]], -1, 6 /* e64 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8m8 = COPY [[PseudoVMV_V_X_M8_]]
     ; CHECK-NEXT: PseudoRET implicit $v8m8
@@ -426,6 +448,7 @@ body:             |
     ; CHECK-NEXT: [[FMV_W_X:%[0-9]+]]:fpr32 = FMV_W_X [[COPY]]
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY [[FMV_W_X]]
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_MF2_:%[0-9]+]]:vr = PseudoVMV_V_X_MF2 [[DEF]], [[COPY1]], -1, 5 /* e32 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8 = COPY [[PseudoVMV_V_X_MF2_]]
     ; CHECK-NEXT: PseudoRET implicit $v8
@@ -448,6 +471,7 @@ body:             |
     ; CHECK-NEXT: [[FMV_W_X:%[0-9]+]]:fpr32 = FMV_W_X [[COPY]]
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY [[FMV_W_X]]
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_M1_:%[0-9]+]]:vr = PseudoVMV_V_X_M1 [[DEF]], [[COPY1]], -1, 5 /* e32 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8 = COPY [[PseudoVMV_V_X_M1_]]
     ; CHECK-NEXT: PseudoRET implicit $v8
@@ -470,6 +494,7 @@ body:             |
     ; CHECK-NEXT: [[FMV_W_X:%[0-9]+]]:fpr32 = FMV_W_X [[COPY]]
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY [[FMV_W_X]]
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_M2_:%[0-9]+]]:vrm2 = PseudoVMV_V_X_M2 [[DEF]], [[COPY1]], -1, 5 /* e32 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8m2 = COPY [[PseudoVMV_V_X_M2_]]
     ; CHECK-NEXT: PseudoRET implicit $v8m2
@@ -492,6 +517,7 @@ body:             |
     ; CHECK-NEXT: [[FMV_W_X:%[0-9]+]]:fpr32 = FMV_W_X [[COPY]]
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY [[FMV_W_X]]
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_M4_:%[0-9]+]]:vrm4 = PseudoVMV_V_X_M4 [[DEF]], [[COPY1]], -1, 5 /* e32 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8m4 = COPY [[PseudoVMV_V_X_M4_]]
     ; CHECK-NEXT: PseudoRET implicit $v8m4
@@ -514,6 +540,7 @@ body:             |
     ; CHECK-NEXT: [[FMV_W_X:%[0-9]+]]:fpr32 = FMV_W_X [[COPY]]
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY [[FMV_W_X]]
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_M8_:%[0-9]+]]:vrm8 = PseudoVMV_V_X_M8 [[DEF]], [[COPY1]], -1, 5 /* e32 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8m8 = COPY [[PseudoVMV_V_X_M8_]]
     ; CHECK-NEXT: PseudoRET implicit $v8m8
@@ -536,6 +563,7 @@ body:             |
     ; CHECK-NEXT: [[FMV_D_X:%[0-9]+]]:fpr64 = FMV_D_X [[COPY]]
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY [[FMV_D_X]]
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_M1_:%[0-9]+]]:vr = PseudoVMV_V_X_M1 [[DEF]], [[COPY1]], -1, 6 /* e64 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8 = COPY [[PseudoVMV_V_X_M1_]]
     ; CHECK-NEXT: PseudoRET implicit $v8
@@ -557,6 +585,7 @@ body:             |
     ; CHECK-NEXT: [[FMV_D_X:%[0-9]+]]:fpr64 = FMV_D_X [[COPY]]
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY [[FMV_D_X]]
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_M2_:%[0-9]+]]:vrm2 = PseudoVMV_V_X_M2 [[DEF]], [[COPY1]], -1, 6 /* e64 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8m2 = COPY [[PseudoVMV_V_X_M2_]]
     ; CHECK-NEXT: PseudoRET implicit $v8m2
@@ -578,6 +607,7 @@ body:             |
     ; CHECK-NEXT: [[FMV_D_X:%[0-9]+]]:fpr64 = FMV_D_X [[COPY]]
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY [[FMV_D_X]]
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_M4_:%[0-9]+]]:vrm4 = PseudoVMV_V_X_M4 [[DEF]], [[COPY1]], -1, 6 /* e64 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8m4 = COPY [[PseudoVMV_V_X_M4_]]
     ; CHECK-NEXT: PseudoRET implicit $v8m4
@@ -599,6 +629,7 @@ body:             |
     ; CHECK-NEXT: [[FMV_D_X:%[0-9]+]]:fpr64 = FMV_D_X [[COPY]]
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY [[FMV_D_X]]
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, -1
     ; CHECK-NEXT: [[PseudoVMV_V_X_M8_:%[0-9]+]]:vrm8 = PseudoVMV_V_X_M8 [[DEF]], [[COPY1]], -1, 6 /* e64 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8m8 = COPY [[PseudoVMV_V_X_M8_]]
     ; CHECK-NEXT: PseudoRET implicit $v8m8
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/splat-vector.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/splat-vector.ll
index 1381c2f7a2bee3..175d0cdfeb6fc7 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/splat-vector.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/splat-vector.ll
@@ -491,4 +491,1132 @@ define <vscale x 8 x double> @splat_zero_nxv8f64() {
   ret <vscale x 8 x double> zeroinitializer
 }
 
+define <vscale x 1 x i1> @splat_const_nxv1i1() {
+; CHECK-LABEL: splat_const_nxv1i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
+; CHECK-NEXT:    vmset.m v0
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 1 x i1> poison, i1 1, i32 0
+  %splat = shufflevector <vscale x 1 x i1> %ins, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  ret <vscale x 1 x i1> %splat
+}
+
+define <vscale x 2 x i1> @splat_const_nxv2i1() {
+; CHECK-LABEL: splat_const_nxv2i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
+; CHECK-NEXT:    vmset.m v0
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 2 x i1> poison, i1 1, i32 0
+  %splat = shufflevector <vscale x 2 x i1> %ins, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  ret <vscale x 2 x i1> %splat
+}
+
+define <vscale x 4 x i1> @splat_const_nxv4i1() {
+; CHECK-LABEL: splat_const_nxv4i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
+; CHECK-NEXT:    vmset.m v0
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 4 x i1> poison, i1 1, i32 0
+  %splat = shufflevector <vscale x 4 x i1> %ins, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  ret <vscale x 4 x i1> %splat
+}
+
+define <vscale x 8 x i1> @splat_const_nxv8i1() {
+; CHECK-LABEL: splat_const_nxv8i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vmset.m v0
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 8 x i1> poison, i1 1, i32 0
+  %splat = shufflevector <vscale x 8 x i1> %ins, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  ret <vscale x 8 x i1> %splat
+}
+
+define <vscale x 16 x i1> @splat_const_nxv16i1() {
+; CHECK-LABEL: splat_const_nxv16i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vmset.m v0
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 16 x i1> poison, i1 1, i32 0
+  %splat = shufflevector <vscale x 16 x i1> %ins, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  ret <vscale x 16 x i1> %splat
+}
+
+define <vscale x 32 x i1> @splat_const_nxv32i1() {
+; CHECK-LABEL: splat_const_nxv32i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
+; CHECK-NEXT:    vmset.m v0
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 32 x i1> poison, i1 1, i32 0
+  %splat = shufflevector <vscale x 32 x i1> %ins, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+  ret <vscale x 32 x i1> %splat
+}
+
+define <vscale x 64 x i1> @splat_const_nxv64i1() {
+; CHECK-LABEL: splat_const_nxv64i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
+; CHECK-NEXT:    vmset.m v0
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 64 x i1> poison, i1 1, i32 0
+  %splat = shufflevector <vscale x 64 x i1> %ins, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
+  ret <vscale x 64 x i1> %splat
+}
+
+define <vscale x 1 x i8> @splat_const_nxv1i8() {
+; CHECK-LABEL: splat_const_nxv1i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 1
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 1 x i8> poison, i8 1, i32 0
+  %splat = shufflevector <vscale x 1 x i8> %ins, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
+  ret <vscale x 1 x i8> %splat
+}
+
+define <vscale x 2 x i8> @splat_const_nxv2i8() {
+; CHECK-LABEL: splat_const_nxv2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 1
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 2 x i8> poison, i8 1, i32 0
+  %splat = shufflevector <vscale x 2 x i8> %ins, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
+  ret <vscale x 2 x i8> %splat
+}
+
+define <vscale x 4 x i8> @splat_const_nxv4i8() {
+; CHECK-LABEL: splat_const_nxv4i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 1
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 4 x i8> poison, i8 1, i32 0
+  %splat = shufflevector <vscale x 4 x i8> %ins, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
+  ret <vscale x 4 x i8> %splat
+}
+
+define <vscale x 8 x i8> @splat_const_nxv8i8() {
+; CHECK-LABEL: splat_const_nxv8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 1
+; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 8 x i8> poison, i8 1, i32 0
+  %splat = shufflevector <vscale x 8 x i8> %ins, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
+  ret <vscale x 8 x i8> %splat
+}
+
+define <vscale x 16 x i8> @splat_const_nxv16i8() {
+; CHECK-LABEL: splat_const_nxv16i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 1
+; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 16 x i8> poison, i8 1, i32 0
+  %splat = shufflevector <vscale x 16 x i8> %ins, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
+  ret <vscale x 16 x i8> %splat
+}
+
+define <vscale x 32 x i8> @splat_const_nxv32i8() {
+; CHECK-LABEL: splat_const_nxv32i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 1
+; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 32 x i8> poison, i8 1, i32 0
+  %splat = shufflevector <vscale x 32 x i8> %ins, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
+  ret <vscale x 32 x i8> %splat
+}
+
+define <vscale x 64 x i8> @splat_const_nxv64i8() {
+; CHECK-LABEL: splat_const_nxv64i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 1
+; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 64 x i8> poison, i8 1, i32 0
+  %splat = shufflevector <vscale x 64 x i8> %ins, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
+  ret <vscale x 64 x i8> %splat
+}
+
+define <vscale x 1 x i16> @splat_const_nxv1i16() {
+; CHECK-LABEL: splat_const_nxv1i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 1
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 1 x i16> poison, i16 1, i32 0
+  %splat = shufflevector <vscale x 1 x i16> %ins, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
+  ret <vscale x 1 x i16> %splat
+}
+
+define <vscale x 2 x i16> @splat_const_nxv2i16() {
+; CHECK-LABEL: splat_const_nxv2i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 1
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 2 x i16> poison, i16 1, i32 0
+  %splat = shufflevector <vscale x 2 x i16> %ins, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
+  ret <vscale x 2 x i16> %splat
+}
+
+define <vscale x 4 x i16> @splat_const_nxv4i16() {
+; CHECK-LABEL: splat_const_nxv4i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 1
+; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 4 x i16> poison, i16 1, i32 0
+  %splat = shufflevector <vscale x 4 x i16> %ins, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
+  ret <vscale x 4 x i16> %splat
+}
+
+define <vscale x 8 x i16> @splat_const_nxv8i16() {
+; CHECK-LABEL: splat_const_nxv8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 1
+; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 8 x i16> poison, i16 1, i32 0
+  %splat = shufflevector <vscale x 8 x i16> %ins, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
+  ret <vscale x 8 x i16> %splat
+}
+
+define <vscale x 16 x i16> @splat_const_nxv16i16() {
+; CHECK-LABEL: splat_const_nxv16i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 1
+; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 16 x i16> poison, i16 1, i32 0
+  %splat = shufflevector <vscale x 16 x i16> %ins, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
+  ret <vscale x 16 x i16> %splat
+}
+
+define <vscale x 32 x i16> @splat_const_nxv32i16() {
+; CHECK-LABEL: splat_const_nxv32i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 1
+; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 32 x i16> poison, i16 1, i32 0
+  %splat = shufflevector <vscale x 32 x i16> %ins, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
+  ret <vscale x 32 x i16> %splat
+}
+
+define <vscale x 1 x i32> @splat_const_nxv1i32() {
+; CHECK-LABEL: splat_const_nxv1i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 1
+; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 1 x i32> poison, i32 1, i32 0
+  %splat = shufflevector <vscale x 1 x i32> %ins, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
+  ret <vscale x 1 x i32> %splat
+}
+
+define <vscale x 2 x i32> @splat_const_nxv2i32() {
+; CHECK-LABEL: splat_const_nxv2i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 1
+; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 2 x i32> poison, i32 1, i32 0
+  %splat = shufflevector <vscale x 2 x i32> %ins, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+  ret <vscale x 2 x i32> %splat
+}
+
+define <vscale x 4 x i32> @splat_const_nxv4i32() {
+; CHECK-LABEL: splat_const_nxv4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 1
+; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 4 x i32> poison, i32 1, i32 0
+  %splat = shufflevector <vscale x 4 x i32> %ins, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+  ret <vscale x 4 x i32> %splat
+}
+
+define <vscale x 8 x i32> @splat_const_nxv8i32() {
+; CHECK-LABEL: splat_const_nxv8i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 1
+; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 8 x i32> poison, i32 1, i32 0
+  %splat = shufflevector <vscale x 8 x i32> %ins, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
+  ret <vscale x 8 x i32> %splat
+}
+
+define <vscale x 16 x i32> @splat_const_nxv16i32() {
+; CHECK-LABEL: splat_const_nxv16i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 1
+; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 16 x i32> poison, i32 1, i32 0
+  %splat = shufflevector <vscale x 16 x i32> %ins, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
+  ret <vscale x 16 x i32> %splat
+}
+
+define <vscale x 1 x i64> @splat_const_nxv1i64() {
+; RV32-LABEL: splat_const_nxv1i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    li a0, 1
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    sw zero, 12(sp)
+; RV32-NEXT:    fld fa5, 8(sp)
+; RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; RV32-NEXT:    vfmv.v.f v8, fa5
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: splat_const_nxv1i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    li a0, 1
+; RV64-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
+; RV64-NEXT:    vmv.v.x v8, a0
+; RV64-NEXT:    ret
+  %ins = insertelement <vscale x 1 x i64> poison, i64 1, i32 0
+  %splat = shufflevector <vscale x 1 x i64> %ins, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
+  ret <vscale x 1 x i64> %splat
+}
+
+define <vscale x 2 x i64> @splat_const_nxv2i64() {
+; RV32-LABEL: splat_const_nxv2i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    li a0, 1
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    sw zero, 12(sp)
+; RV32-NEXT:    fld fa5, 8(sp)
+; RV32-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; RV32-NEXT:    vfmv.v.f v8, fa5
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: splat_const_nxv2i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    li a0, 1
+; RV64-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
+; RV64-NEXT:    vmv.v.x v8, a0
+; RV64-NEXT:    ret
+  %ins = insertelement <vscale x 2 x i64> poison, i64 1, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+  ret <vscale x 2 x i64> %splat
+}
+
+define <vscale x 4 x i64> @splat_const_nxv4i64() {
+; RV32-LABEL: splat_const_nxv4i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    li a0, 1
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    sw zero, 12(sp)
+; RV32-NEXT:    fld fa5, 8(sp)
+; RV32-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; RV32-NEXT:    vfmv.v.f v8, fa5
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: splat_const_nxv4i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    li a0, 1
+; RV64-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
+; RV64-NEXT:    vmv.v.x v8, a0
+; RV64-NEXT:    ret
+  %ins = insertelement <vscale x 4 x i64> poison, i64 1, i32 0
+  %splat = shufflevector <vscale x 4 x i64> %ins, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+  ret <vscale x 4 x i64> %splat
+}
+
+define <vscale x 8 x i64> @splat_const_nxv8i64() {
+; RV32-LABEL: splat_const_nxv8i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    li a0, 1
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    sw zero, 12(sp)
+; RV32-NEXT:    fld fa5, 8(sp)
+; RV32-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV32-NEXT:    vfmv.v.f v8, fa5
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: splat_const_nxv8i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    li a0, 1
+; RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; RV64-NEXT:    vmv.v.x v8, a0
+; RV64-NEXT:    ret
+  %ins = insertelement <vscale x 8 x i64> poison, i64 1, i32 0
+  %splat = shufflevector <vscale x 8 x i64> %ins, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
+  ret <vscale x 8 x i64> %splat
+}
+
+define <vscale x 1 x float> @splat_const_nxv1f32() {
+; RV32-LABEL: splat_const_nxv1f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    lui a0, 260096
+; RV32-NEXT:    fmv.w.x fa5, a0
+; RV32-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; RV32-NEXT:    vfmv.v.f v8, fa5
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: splat_const_nxv1f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    lui a0, 260096
+; RV64-NEXT:    fmv.w.x fa5, a0
+; RV64-NEXT:    fmv.x.w a0, fa5
+; RV64-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
+; RV64-NEXT:    vmv.v.x v8, a0
+; RV64-NEXT:    ret
+  %ins = insertelement <vscale x 1 x float> poison, float 1.0, i32 0
+  %splat = shufflevector <vscale x 1 x float> %ins, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
+  ret <vscale x 1 x float> %splat
+}
+
+define <vscale x 2 x float> @splat_const_nxv2f32() {
+; RV32-LABEL: splat_const_nxv2f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    lui a0, 260096
+; RV32-NEXT:    fmv.w.x fa5, a0
+; RV32-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; RV32-NEXT:    vfmv.v.f v8, fa5
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: splat_const_nxv2f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    lui a0, 260096
+; RV64-NEXT:    fmv.w.x fa5, a0
+; RV64-NEXT:    fmv.x.w a0, fa5
+; RV64-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
+; RV64-NEXT:    vmv.v.x v8, a0
+; RV64-NEXT:    ret
+  %ins = insertelement <vscale x 2 x float> poison, float 1.0, i32 0
+  %splat = shufflevector <vscale x 2 x float> %ins, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
+  ret <vscale x 2 x float> %splat
+}
+
+define <vscale x 4 x float> @splat_const_nxv4f32() {
+; RV32-LABEL: splat_const_nxv4f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    lui a0, 260096
+; RV32-NEXT:    fmv.w.x fa5, a0
+; RV32-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; RV32-NEXT:    vfmv.v.f v8, fa5
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: splat_const_nxv4f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    lui a0, 260096
+; RV64-NEXT:    fmv.w.x fa5, a0
+; RV64-NEXT:    fmv.x.w a0, fa5
+; RV64-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
+; RV64-NEXT:    vmv.v.x v8, a0
+; RV64-NEXT:    ret
+  %ins = insertelement <vscale x 4 x float> poison, float 1.0, i32 0
+  %splat = shufflevector <vscale x 4 x float> %ins, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
+  ret <vscale x 4 x float> %splat
+}
+
+define <vscale x 8 x float> @splat_const_nxv8f32() {
+; RV32-LABEL: splat_const_nxv8f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    lui a0, 260096
+; RV32-NEXT:    fmv.w.x fa5, a0
+; RV32-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; RV32-NEXT:    vfmv.v.f v8, fa5
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: splat_const_nxv8f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    lui a0, 260096
+; RV64-NEXT:    fmv.w.x fa5, a0
+; RV64-NEXT:    fmv.x.w a0, fa5
+; RV64-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
+; RV64-NEXT:    vmv.v.x v8, a0
+; RV64-NEXT:    ret
+  %ins = insertelement <vscale x 8 x float> poison, float 1.0, i32 0
+  %splat = shufflevector <vscale x 8 x float> %ins, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
+  ret <vscale x 8 x float> %splat
+}
+
+define <vscale x 16 x float> @splat_const_nxv16f32() {
+; RV32-LABEL: splat_const_nxv16f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    lui a0, 260096
+; RV32-NEXT:    fmv.w.x fa5, a0
+; RV32-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
+; RV32-NEXT:    vfmv.v.f v8, fa5
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: splat_const_nxv16f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    lui a0, 260096
+; RV64-NEXT:    fmv.w.x fa5, a0
+; RV64-NEXT:    fmv.x.w a0, fa5
+; RV64-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
+; RV64-NEXT:    vmv.v.x v8, a0
+; RV64-NEXT:    ret
+  %ins = insertelement <vscale x 16 x float> poison, float 1.0, i32 0
+  %splat = shufflevector <vscale x 16 x float> %ins, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
+  ret <vscale x 16 x float> %splat
+}
+
+define <vscale x 1 x double> @splat_const_nxv1f64() {
+; RV32-LABEL: splat_const_nxv1f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    lui a0, 261888
+; RV32-NEXT:    sw zero, 8(sp)
+; RV32-NEXT:    sw a0, 12(sp)
+; RV32-NEXT:    fld fa5, 8(sp)
+; RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; RV32-NEXT:    vfmv.v.f v8, fa5
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: splat_const_nxv1f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    li a0, 1023
+; RV64-NEXT:    slli a0, a0, 52
+; RV64-NEXT:    fmv.d.x fa5, a0
+; RV64-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; RV64-NEXT:    vfmv.v.f v8, fa5
+; RV64-NEXT:    ret
+  %ins = insertelement <vscale x 1 x double> poison, double 1.0, i32 0
+  %splat = shufflevector <vscale x 1 x double> %ins, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
+  ret <vscale x 1 x double> %splat
+}
+
+define <vscale x 2 x double> @splat_const_nxv2f64() {
+; RV32-LABEL: splat_const_nxv2f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    lui a0, 261888
+; RV32-NEXT:    sw zero, 8(sp)
+; RV32-NEXT:    sw a0, 12(sp)
+; RV32-NEXT:    fld fa5, 8(sp)
+; RV32-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; RV32-NEXT:    vfmv.v.f v8, fa5
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: splat_const_nxv2f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    li a0, 1023
+; RV64-NEXT:    slli a0, a0, 52
+; RV64-NEXT:    fmv.d.x fa5, a0
+; RV64-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; RV64-NEXT:    vfmv.v.f v8, fa5
+; RV64-NEXT:    ret
+  %ins = insertelement <vscale x 2 x double> poison, double 1.0, i32 0
+  %splat = shufflevector <vscale x 2 x double> %ins, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
+  ret <vscale x 2 x double> %splat
+}
+
+define <vscale x 4 x double> @splat_const_nxv4f64() {
+; RV32-LABEL: splat_const_nxv4f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    lui a0, 261888
+; RV32-NEXT:    sw zero, 8(sp)
+; RV32-NEXT:    sw a0, 12(sp)
+; RV32-NEXT:    fld fa5, 8(sp)
+; RV32-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; RV32-NEXT:    vfmv.v.f v8, fa5
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: splat_const_nxv4f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    li a0, 1023
+; RV64-NEXT:    slli a0, a0, 52
+; RV64-NEXT:    fmv.d.x fa5, a0
+; RV64-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; RV64-NEXT:    vfmv.v.f v8, fa5
+; RV64-NEXT:    ret
+  %ins = insertelement <vscale x 4 x double> poison, double 1.0, i32 0
+  %splat = shufflevector <vscale x 4 x double> %ins, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
+  ret <vscale x 4 x double> %splat
+}
+
+define <vscale x 8 x double> @splat_const_nxv8f64() {
+; RV32-LABEL: splat_const_nxv8f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    lui a0, 261888
+; RV32-NEXT:    sw zero, 8(sp)
+; RV32-NEXT:    sw a0, 12(sp)
+; RV32-NEXT:    fld fa5, 8(sp)
+; RV32-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV32-NEXT:    vfmv.v.f v8, fa5
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: splat_const_nxv8f64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    li a0, 1023
+; RV64-NEXT:    slli a0, a0, 52
+; RV64-NEXT:    fmv.d.x fa5, a0
+; RV64-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV64-NEXT:    vfmv.v.f v8, fa5
+; RV64-NEXT:    ret
+  %ins = insertelement <vscale x 8 x double> poison, double 1.0, i32 0
+  %splat = shufflevector <vscale x 8 x double> %ins, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
+  ret <vscale x 8 x double> %splat
+}
+
+define <vscale x 1 x i1> @splat_nxv1i1(i1 %x) {
+; CHECK-LABEL: splat_nxv1i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    andi a0, a0, 1
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    vmv.v.x v9, zero
+; CHECK-NEXT:    vmsne.vv v0, v8, v9
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 1 x i1> poison, i1 %x, i32 0
+  %splat = shufflevector <vscale x 1 x i1> %ins, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+  ret <vscale x 1 x i1> %splat
+}
+
+define <vscale x 2 x i1> @splat_nxv2i1(i1 %x) {
+; CHECK-LABEL: splat_nxv2i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    andi a0, a0, 1
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    vmv.v.x v9, zero
+; CHECK-NEXT:    vmsne.vv v0, v8, v9
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 2 x i1> poison, i1 %x, i32 0
+  %splat = shufflevector <vscale x 2 x i1> %ins, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+  ret <vscale x 2 x i1> %splat
+}
+
+define <vscale x 4 x i1> @splat_nxv4i1(i1 %x) {
+; CHECK-LABEL: splat_nxv4i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    andi a0, a0, 1
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    vmv.v.x v9, zero
+; CHECK-NEXT:    vmsne.vv v0, v8, v9
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 4 x i1> poison, i1 %x, i32 0
+  %splat = shufflevector <vscale x 4 x i1> %ins, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+  ret <vscale x 4 x i1> %splat
+}
+
+define <vscale x 8 x i1> @splat_nxv8i1(i1 %x) {
+; CHECK-LABEL: splat_nxv8i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vmset.m v0
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 8 x i1> poison, i1 1, i32 0
+  %splat = shufflevector <vscale x 8 x i1> %ins, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+  ret <vscale x 8 x i1> %splat
+}
+
+define <vscale x 16 x i1> @splat_nxv16i1(i1 %x) {
+; CHECK-LABEL: splat_nxv16i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    andi a0, a0, 1
+; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    vmv.v.x v10, zero
+; CHECK-NEXT:    vmsne.vv v0, v8, v10
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 16 x i1> poison, i1 %x, i32 0
+  %splat = shufflevector <vscale x 16 x i1> %ins, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+  ret <vscale x 16 x i1> %splat
+}
+
+define <vscale x 32 x i1> @splat_nxv32i1(i1 %x) {
+; CHECK-LABEL: splat_nxv32i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    andi a0, a0, 1
+; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    vmv.v.x v12, zero
+; CHECK-NEXT:    vmsne.vv v0, v8, v12
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 32 x i1> poison, i1 %x, i32 0
+  %splat = shufflevector <vscale x 32 x i1> %ins, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+  ret <vscale x 32 x i1> %splat
+}
+
+define <vscale x 64 x i1> @splat_nxv64i1(i1 %x) {
+; CHECK-LABEL: splat_nxv64i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    andi a0, a0, 1
+; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    vmv.v.x v16, zero
+; CHECK-NEXT:    vmsne.vv v0, v8, v16
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 64 x i1> poison, i1 %x, i32 0
+  %splat = shufflevector <vscale x 64 x i1> %ins, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
+  ret <vscale x 64 x i1> %splat
+}
+
+define <vscale x 1 x i8> @splat_nxv1i8(i8 %x) {
+; CHECK-LABEL: splat_nxv1i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 1 x i8> poison, i8 %x, i32 0
+  %splat = shufflevector <vscale x 1 x i8> %ins, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
+  ret <vscale x 1 x i8> %splat
+}
+
+define <vscale x 2 x i8> @splat_nxv2i8(i8 %x) {
+; CHECK-LABEL: splat_nxv2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 2 x i8> poison, i8 %x, i32 0
+  %splat = shufflevector <vscale x 2 x i8> %ins, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
+  ret <vscale x 2 x i8> %splat
+}
+
+define <vscale x 4 x i8> @splat_nxv4i8(i8 %x) {
+; CHECK-LABEL: splat_nxv4i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 4 x i8> poison, i8 %x, i32 0
+  %splat = shufflevector <vscale x 4 x i8> %ins, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
+  ret <vscale x 4 x i8> %splat
+}
+
+define <vscale x 8 x i8> @splat_nxv8i8(i8 %x) {
+; CHECK-LABEL: splat_nxv8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 8 x i8> poison, i8 %x, i32 0
+  %splat = shufflevector <vscale x 8 x i8> %ins, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
+  ret <vscale x 8 x i8> %splat
+}
+
+define <vscale x 16 x i8> @splat_nxv16i8(i8 %x) {
+; CHECK-LABEL: splat_nxv16i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 16 x i8> poison, i8 %x, i32 0
+  %splat = shufflevector <vscale x 16 x i8> %ins, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
+  ret <vscale x 16 x i8> %splat
+}
+
+define <vscale x 32 x i8> @splat_nxv32i8(i8 %x) {
+; CHECK-LABEL: splat_nxv32i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 32 x i8> poison, i8 %x, i32 0
+  %splat = shufflevector <vscale x 32 x i8> %ins, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
+  ret <vscale x 32 x i8> %splat
+}
+
+define <vscale x 64 x i8> @splat_nxv64i8(i8 %x) {
+; CHECK-LABEL: splat_nxv64i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 64 x i8> poison, i8 %x, i32 0
+  %splat = shufflevector <vscale x 64 x i8> %ins, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
+  ret <vscale x 64 x i8> %splat
+}
+
+define <vscale x 1 x i16> @splat_nxv1i16(i16 %x) {
+; CHECK-LABEL: splat_nxv1i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 1 x i16> poison, i16 %x, i32 0
+  %splat = shufflevector <vscale x 1 x i16> %ins, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
+  ret <vscale x 1 x i16> %splat
+}
+
+define <vscale x 2 x i16> @splat_nxv2i16(i16 %x) {
+; CHECK-LABEL: splat_nxv2i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 2 x i16> poison, i16 %x, i32 0
+  %splat = shufflevector <vscale x 2 x i16> %ins, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
+  ret <vscale x 2 x i16> %splat
+}
+
+define <vscale x 4 x i16> @splat_nxv4i16(i16 %x) {
+; CHECK-LABEL: splat_nxv4i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 4 x i16> poison, i16 %x, i32 0
+  %splat = shufflevector <vscale x 4 x i16> %ins, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
+  ret <vscale x 4 x i16> %splat
+}
+
+define <vscale x 8 x i16> @splat_nxv8i16(i16 %x) {
+; CHECK-LABEL: splat_nxv8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 8 x i16> poison, i16 %x, i32 0
+  %splat = shufflevector <vscale x 8 x i16> %ins, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
+  ret <vscale x 8 x i16> %splat
+}
+
+define <vscale x 16 x i16> @splat_nxv16i16(i16 %x) {
+; CHECK-LABEL: splat_nxv16i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 16 x i16> poison, i16 %x, i32 0
+  %splat = shufflevector <vscale x 16 x i16> %ins, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
+  ret <vscale x 16 x i16> %splat
+}
+
+define <vscale x 32 x i16> @splat_nxv32i16(i16 %x) {
+; CHECK-LABEL: splat_nxv32i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 32 x i16> poison, i16 %x, i32 0
+  %splat = shufflevector <vscale x 32 x i16> %ins, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
+  ret <vscale x 32 x i16> %splat
+}
+
+define <vscale x 1 x i32> @splat_nxv1i32(i32 %x) {
+; CHECK-LABEL: splat_nxv1i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 1 x i32> poison, i32 %x, i32 0
+  %splat = shufflevector <vscale x 1 x i32> %ins, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
+  ret <vscale x 1 x i32> %splat
+}
+
+define <vscale x 2 x i32> @splat_nxv2i32(i32 %x) {
+; CHECK-LABEL: splat_nxv2i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 2 x i32> poison, i32 %x, i32 0
+  %splat = shufflevector <vscale x 2 x i32> %ins, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+  ret <vscale x 2 x i32> %splat
+}
+
+define <vscale x 4 x i32> @splat_nxv4i32(i32 %x) {
+; CHECK-LABEL: splat_nxv4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 4 x i32> poison, i32 %x, i32 0
+  %splat = shufflevector <vscale x 4 x i32> %ins, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+  ret <vscale x 4 x i32> %splat
+}
+
+define <vscale x 8 x i32> @splat_nxv8i32(i32 %x) {
+; CHECK-LABEL: splat_nxv8i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 8 x i32> poison, i32 %x, i32 0
+  %splat = shufflevector <vscale x 8 x i32> %ins, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
+  ret <vscale x 8 x i32> %splat
+}
+
+define <vscale x 16 x i32> @splat_nxv16i32(i32 %x) {
+; CHECK-LABEL: splat_nxv16i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 16 x i32> poison, i32 %x, i32 0
+  %splat = shufflevector <vscale x 16 x i32> %ins, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
+  ret <vscale x 16 x i32> %splat
+}
+
+define <vscale x 1 x i64> @splat_nxv1i64(i64 %x) {
+; RV32-LABEL: splat_nxv1i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    fld fa5, 8(sp)
+; RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; RV32-NEXT:    vfmv.v.f v8, fa5
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: splat_nxv1i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
+; RV64-NEXT:    vmv.v.x v8, a0
+; RV64-NEXT:    ret
+  %ins = insertelement <vscale x 1 x i64> poison, i64 %x, i32 0
+  %splat = shufflevector <vscale x 1 x i64> %ins, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
+  ret <vscale x 1 x i64> %splat
+}
+
+define <vscale x 2 x i64> @splat_nxv2i64(i64 %x) {
+; RV32-LABEL: splat_nxv2i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    fld fa5, 8(sp)
+; RV32-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; RV32-NEXT:    vfmv.v.f v8, fa5
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: splat_nxv2i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
+; RV64-NEXT:    vmv.v.x v8, a0
+; RV64-NEXT:    ret
+  %ins = insertelement <vscale x 2 x i64> poison, i64 %x, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %ins, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+  ret <vscale x 2 x i64> %splat
+}
+
+define <vscale x 4 x i64> @splat_nxv4i64(i64 %x) {
+; RV32-LABEL: splat_nxv4i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    fld fa5, 8(sp)
+; RV32-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; RV32-NEXT:    vfmv.v.f v8, fa5
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: splat_nxv4i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
+; RV64-NEXT:    vmv.v.x v8, a0
+; RV64-NEXT:    ret
+  %ins = insertelement <vscale x 4 x i64> poison, i64 %x, i32 0
+  %splat = shufflevector <vscale x 4 x i64> %ins, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+  ret <vscale x 4 x i64> %splat
+}
+
+define <vscale x 8 x i64> @splat_nxv8i64(i64 %x) {
+; RV32-LABEL: splat_nxv8i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    fld fa5, 8(sp)
+; RV32-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV32-NEXT:    vfmv.v.f v8, fa5
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: splat_nxv8i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; RV64-NEXT:    vmv.v.x v8, a0
+; RV64-NEXT:    ret
+  %ins = insertelement <vscale x 8 x i64> poison, i64 %x, i32 0
+  %splat = shufflevector <vscale x 8 x i64> %ins, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
+  ret <vscale x 8 x i64> %splat
+}
+
+define <vscale x 1 x float> @splat_nxv1f32(float %x) {
+; RV32-LABEL: splat_nxv1f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; RV32-NEXT:    vfmv.v.f v8, fa0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: splat_nxv1f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    fmv.x.w a0, fa0
+; RV64-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
+; RV64-NEXT:    vmv.v.x v8, a0
+; RV64-NEXT:    ret
+  %ins = insertelement <vscale x 1 x float> poison, float %x, i32 0
+  %splat = shufflevector <vscale x 1 x float> %ins, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
+  ret <vscale x 1 x float> %splat
+}
+
+define <vscale x 2 x float> @splat_nxv2f32(float %x) {
+; RV32-LABEL: splat_nxv2f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; RV32-NEXT:    vfmv.v.f v8, fa0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: splat_nxv2f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    fmv.x.w a0, fa0
+; RV64-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
+; RV64-NEXT:    vmv.v.x v8, a0
+; RV64-NEXT:    ret
+  %ins = insertelement <vscale x 2 x float> poison, float %x, i32 0
+  %splat = shufflevector <vscale x 2 x float> %ins, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
+  ret <vscale x 2 x float> %splat
+}
+
+define <vscale x 4 x float> @splat_nxv4f32(float %x) {
+; RV32-LABEL: splat_nxv4f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; RV32-NEXT:    vfmv.v.f v8, fa0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: splat_nxv4f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    fmv.x.w a0, fa0
+; RV64-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
+; RV64-NEXT:    vmv.v.x v8, a0
+; RV64-NEXT:    ret
+  %ins = insertelement <vscale x 4 x float> poison, float %x, i32 0
+  %splat = shufflevector <vscale x 4 x float> %ins, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
+  ret <vscale x 4 x float> %splat
+}
+
+define <vscale x 8 x float> @splat_nxv8f32(float %x) {
+; RV32-LABEL: splat_nxv8f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; RV32-NEXT:    vfmv.v.f v8, fa0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: splat_nxv8f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    fmv.x.w a0, fa0
+; RV64-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
+; RV64-NEXT:    vmv.v.x v8, a0
+; RV64-NEXT:    ret
+  %ins = insertelement <vscale x 8 x float> poison, float %x, i32 0
+  %splat = shufflevector <vscale x 8 x float> %ins, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
+  ret <vscale x 8 x float> %splat
+}
+
+define <vscale x 16 x float> @splat_nxv16f32(float %x) {
+; RV32-LABEL: splat_nxv16f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
+; RV32-NEXT:    vfmv.v.f v8, fa0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: splat_nxv16f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    fmv.x.w a0, fa0
+; RV64-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
+; RV64-NEXT:    vmv.v.x v8, a0
+; RV64-NEXT:    ret
+  %ins = insertelement <vscale x 16 x float> poison, float %x, i32 0
+  %splat = shufflevector <vscale x 16 x float> %ins, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
+  ret <vscale x 16 x float> %splat
+}
+
+define <vscale x 1 x double> @splat_nxv1f64(double %x) {
+; CHECK-LABEL: splat_nxv1f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; CHECK-NEXT:    vfmv.v.f v8, fa0
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 1 x double> poison, double %x, i32 0
+  %splat = shufflevector <vscale x 1 x double> %ins, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
+  ret <vscale x 1 x double> %splat
+}
+
+define <vscale x 2 x double> @splat_nxv2f64(double %x) {
+; CHECK-LABEL: splat_nxv2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; CHECK-NEXT:    vfmv.v.f v8, fa0
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 2 x double> poison, double %x, i32 0
+  %splat = shufflevector <vscale x 2 x double> %ins, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
+  ret <vscale x 2 x double> %splat
+}
+
+define <vscale x 4 x double> @splat_nxv4f64(double %x) {
+; CHECK-LABEL: splat_nxv4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; CHECK-NEXT:    vfmv.v.f v8, fa0
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 4 x double> poison, double %x, i32 0
+  %splat = shufflevector <vscale x 4 x double> %ins, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
+  ret <vscale x 4 x double> %splat
+}
 
+define <vscale x 8 x double> @splat_nxv8f64(double %x) {
+; CHECK-LABEL: splat_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; CHECK-NEXT:    vfmv.v.f v8, fa0
+; CHECK-NEXT:    ret
+  %ins = insertelement <vscale x 8 x double> poison, double %x, i32 0
+  %splat = shufflevector <vscale x 8 x double> %ins, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
+  ret <vscale x 8 x double> %splat
+}

>From 1df86ec4b8b839602efff40364ea2ae48b969779 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Mon, 7 Oct 2024 07:08:19 -0700
Subject: [PATCH 6/6] fixup! use earlySelect

---
 .../RISCV/GISel/RISCVInstructionSelector.cpp  | 42 +++++++++++++------
 1 file changed, 29 insertions(+), 13 deletions(-)

diff --git a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
index de359384e4debe..246c98942c42f2 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
@@ -68,6 +68,9 @@ class RISCVInstructionSelector : public InstructionSelector {
   // Returns true if the instruction was modified.
   void preISelLower(MachineInstr &MI, MachineIRBuilder &MIB);
 
+  // An early selection function that runs before the selectImpl() call.
+  bool earlySelect(MachineInstr &I);
+
   bool replacePtrWithInt(MachineOperand &Op, MachineIRBuilder &MIB);
 
   // Custom selection methods
@@ -530,7 +533,6 @@ bool RISCVInstructionSelector::select(MachineInstr &MI) {
   MachineFunction &MF = *MBB.getParent();
   MachineIRBuilder MIB(MI);
 
-  bool OpcWasGSplatVector = MI.getOpcode() == TargetOpcode::G_SPLAT_VECTOR;
   preISelLower(MI, MIB);
   const unsigned Opc = MI.getOpcode();
 
@@ -569,17 +571,8 @@ bool RISCVInstructionSelector::select(MachineInstr &MI) {
     return true;
   }
 
-  // FIXME: We create a IMPLICIT_DEF and a G_CONSTANT in preISelLower when
-  // we encounter a G_SPLAT_VECTOR. We cannot select the G_CONSTANT until after
-  // the MI is lowered, since renderVLOp needs to see the G_CONSTANT. It would
-  // be nice if the InstructionSelector selected these instructions without
-  // needing to call select on them explicitly.
-  if (OpcWasGSplatVector) {
-    MachineInstr *Passthru = MRI->getVRegDef(MI.getOperand(1).getReg());
-    MachineInstr *VL = MRI->getVRegDef(MI.getOperand(3).getReg());
-    if (selectImpl(MI, *CoverageInfo))
-      return select(*Passthru) && select(*VL);
-  }
+  if (earlySelect(MI))
+    return true;
 
   if (selectImpl(MI, *CoverageInfo))
     return true;
@@ -815,6 +808,19 @@ void RISCVInstructionSelector::preISelLower(MachineInstr &MI,
     MRI->setType(DstReg, sXLen);
     break;
   }
+
+  }
+}
+
+bool RISCVInstructionSelector::earlySelect(MachineInstr &MI) {
+  assert(MI.getParent() && "Instruction should be in a basic block!");
+  assert(MI.getParent()->getParent() && "Instruction should be in a function!");
+
+  MachineIRBuilder MIB(MI);
+
+  switch (MI.getOpcode()) {
+  default:
+    break;
   case TargetOpcode::G_SPLAT_VECTOR: {
     // Convert integer SPLAT_VECTOR to VMV_V_X_VL and floating-point
     // SPLAT_VECTOR to VFMV_V_F_VL to reduce isel burden.
@@ -835,13 +841,23 @@ void RISCVInstructionSelector::preISelLower(MachineInstr &MI,
     auto VLMax = MIB.buildConstant(sXLen, -1);
     MRI->setRegBank(Passthru.getReg(0), RBI.getRegBank(RISCV::VRBRegBankID));
     MRI->setRegBank(VLMax.getReg(0), RBI.getRegBank(RISCV::GPRBRegBankID));
+
+    // We create a IMPLICIT_DEF and a G_CONSTANT when
+    // we encounter a G_SPLAT_VECTOR. We cannot select the G_CONSTANT until
+    // after the MI is lowered, since renderVLOp needs to see the G_CONSTANT.
+    // FIXME: It would be nice if the InstructionSelector selected these
+    // instructions without needing to call select on them explicitly,
+    // which would allow us to lower G_SPLAT_VECTOR in preISelLower and
+    // rely on select to do the selection instead of early selecting here.
     MachineInstrBuilder(*MI.getMF(), &MI)
         .addUse(Passthru.getReg(0))
         .addUse(Scalar)
         .addUse(VLMax.getReg(0));
-    break;
+    if (selectImpl(MI, *CoverageInfo))
+      return select(*Passthru) && select(*VLMax);
   }
   }
+  return false;
 }
 
 void RISCVInstructionSelector::renderNegImm(MachineInstrBuilder &MIB,



More information about the llvm-commits mailing list