[llvm] ca5b81b - [RISCV][GISEL] regbankselect for G_SPLAT_VECTOR (#110744)

via llvm-commits llvm-commits at lists.llvm.org
Tue Oct 1 18:50:25 PDT 2024


Author: Michael Maitland
Date: 2024-10-01T21:50:21-04:00
New Revision: ca5b81bdefa19fc778bee4d2c347b6c3077038b5

URL: https://github.com/llvm/llvm-project/commit/ca5b81bdefa19fc778bee4d2c347b6c3077038b5
DIFF: https://github.com/llvm/llvm-project/commit/ca5b81bdefa19fc778bee4d2c347b6c3077038b5.diff

LOG: [RISCV][GISEL] regbankselect for G_SPLAT_VECTOR (#110744)

Added: 
    llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/splatvector-rv32.mir
    llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/splatvector-rv64.mir

Modified: 
    llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
index 5369be24f0e7cb..aa06cb4d5327c9 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
@@ -478,6 +478,21 @@ RISCVRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
     }
     break;
   }
+  case TargetOpcode::G_SPLAT_VECTOR: {
+    OpdsMapping[0] = getVRBValueMapping(MRI.getType(MI.getOperand(0).getReg())
+                                            .getSizeInBits()
+                                            .getKnownMinValue());
+
+    LLT ScalarTy = MRI.getType(MI.getOperand(1).getReg());
+    MachineInstr *DefMI = MRI.getVRegDef(MI.getOperand(1).getReg());
+    if ((GPRSize == 32 && ScalarTy.getSizeInBits() == 64) ||
+        onlyDefinesFP(*DefMI, MRI, TRI)) {
+      assert(MF.getSubtarget<RISCVSubtarget>().hasStdExtD());
+      OpdsMapping[1] = getFPValueMapping(ScalarTy.getSizeInBits());
+    } else
+      OpdsMapping[1] = GPRValueMapping;
+    break;
+  }
   default:
     // By default map all scalars to GPR.
     for (unsigned Idx = 0; Idx < NumOperands; ++Idx) {

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/splatvector-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/splatvector-rv32.mir
new file mode 100644
index 00000000000000..b44bece8eadf84
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/splatvector-rv32.mir
@@ -0,0 +1,549 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=riscv32 -mattr=+m,+v -run-pass=regbankselect \
+# RUN:   -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN:   -o - | FileCheck %s
+
+---
+name:            splat_zero_nxv1i8
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv1i8
+    ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[C]](s32)
+    ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %3:_(s32) = G_CONSTANT i32 0
+    %0:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR %3(s32)
+    $v8 = COPY %0(<vscale x 1 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv2i8
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv2i8
+    ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C]](s32)
+    ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %3:_(s32) = G_CONSTANT i32 0
+    %0:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR %3(s32)
+    $v8 = COPY %0(<vscale x 2 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv4i8
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv4i8
+    ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C]](s32)
+    ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %3:_(s32) = G_CONSTANT i32 0
+    %0:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR %3(s32)
+    $v8 = COPY %0(<vscale x 4 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv8i8
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv8i8
+    ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[C]](s32)
+    ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %3:_(s32) = G_CONSTANT i32 0
+    %0:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR %3(s32)
+    $v8 = COPY %0(<vscale x 8 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv16i8
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv16i8
+    ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[C]](s32)
+    ; CHECK-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %3:_(s32) = G_CONSTANT i32 0
+    %0:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR %3(s32)
+    $v8m2 = COPY %0(<vscale x 16 x s8>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            splat_zero_nxv32i8
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv32i8
+    ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[C]](s32)
+    ; CHECK-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 32 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %3:_(s32) = G_CONSTANT i32 0
+    %0:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR %3(s32)
+    $v8m4 = COPY %0(<vscale x 32 x s8>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            splat_zero_nxv64i8
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv64i8
+    ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[C]](s32)
+    ; CHECK-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 64 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %3:_(s32) = G_CONSTANT i32 0
+    %0:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR %3(s32)
+    $v8m8 = COPY %0(<vscale x 64 x s8>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            splat_zero_nxv1i16
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv1i16
+    ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[C]](s32)
+    ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %3:_(s32) = G_CONSTANT i32 0
+    %0:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR %3(s32)
+    $v8 = COPY %0(<vscale x 1 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv2i16
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv2i16
+    ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[C]](s32)
+    ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %3:_(s32) = G_CONSTANT i32 0
+    %0:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR %3(s32)
+    $v8 = COPY %0(<vscale x 2 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv4i16
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv4i16
+    ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[C]](s32)
+    ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %3:_(s32) = G_CONSTANT i32 0
+    %0:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR %3(s32)
+    $v8 = COPY %0(<vscale x 4 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv8i16
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv8i16
+    ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[C]](s32)
+    ; CHECK-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %3:_(s32) = G_CONSTANT i32 0
+    %0:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR %3(s32)
+    $v8m2 = COPY %0(<vscale x 8 x s16>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            splat_zero_nxv16i16
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv16i16
+    ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[C]](s32)
+    ; CHECK-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %3:_(s32) = G_CONSTANT i32 0
+    %0:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR %3(s32)
+    $v8m4 = COPY %0(<vscale x 16 x s16>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            splat_zero_nxv32i16
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv32i16
+    ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_SPLAT_VECTOR [[C]](s32)
+    ; CHECK-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 32 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %3:_(s32) = G_CONSTANT i32 0
+    %0:_(<vscale x 32 x s16>) = G_SPLAT_VECTOR %3(s32)
+    $v8m8 = COPY %0(<vscale x 32 x s16>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            splat_zero_nxv1i32
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv1i32
+    ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+    ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %1:_(s32) = G_CONSTANT i32 0
+    %0:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR %1(s32)
+    $v8 = COPY %0(<vscale x 1 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv2i32
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv2i32
+    ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+    ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %1:_(s32) = G_CONSTANT i32 0
+    %0:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR %1(s32)
+    $v8 = COPY %0(<vscale x 2 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv4i32
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv4i32
+    ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+    ; CHECK-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %1:_(s32) = G_CONSTANT i32 0
+    %0:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR %1(s32)
+    $v8m2 = COPY %0(<vscale x 4 x s32>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            splat_zero_nxv8i32
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv8i32
+    ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+    ; CHECK-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %1:_(s32) = G_CONSTANT i32 0
+    %0:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR %1(s32)
+    $v8m4 = COPY %0(<vscale x 8 x s32>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            splat_zero_nxv16i32
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv16i32
+    ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+    ; CHECK-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %1:_(s32) = G_CONSTANT i32 0
+    %0:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR %1(s32)
+    $v8m8 = COPY %0(<vscale x 16 x s32>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            splat_zero_nxv1i64
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv1i64
+    ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[MV:%[0-9]+]]:fprb(s64) = G_MERGE_VALUES [[C]](s32), [[C1]](s32)
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_SPLAT_VECTOR [[MV]](s64)
+    ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %2:_(s32) = G_CONSTANT i32 0
+    %3:_(s32) = G_CONSTANT i32 0
+    %1:_(s64) = G_MERGE_VALUES %2(s32), %3(s32)
+    %0:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR %1(s64)
+    $v8 = COPY %0(<vscale x 1 x s64>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv2i64
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv2i64
+    ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[MV:%[0-9]+]]:fprb(s64) = G_MERGE_VALUES [[C]](s32), [[C1]](s32)
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_SPLAT_VECTOR [[MV]](s64)
+    ; CHECK-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %2:_(s32) = G_CONSTANT i32 0
+    %3:_(s32) = G_CONSTANT i32 0
+    %1:_(s64) = G_MERGE_VALUES %2(s32), %3(s32)
+    %0:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR %1(s64)
+    $v8m2 = COPY %0(<vscale x 2 x s64>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            splat_zero_nxv4i64
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv4i64
+    ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[MV:%[0-9]+]]:fprb(s64) = G_MERGE_VALUES [[C]](s32), [[C1]](s32)
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_SPLAT_VECTOR [[MV]](s64)
+    ; CHECK-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %2:_(s32) = G_CONSTANT i32 0
+    %3:_(s32) = G_CONSTANT i32 0
+    %1:_(s64) = G_MERGE_VALUES %2(s32), %3(s32)
+    %0:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR %1(s64)
+    $v8m4 = COPY %0(<vscale x 4 x s64>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            splat_zero_nxv8i64
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv8i64
+    ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[MV:%[0-9]+]]:fprb(s64) = G_MERGE_VALUES [[C]](s32), [[C1]](s32)
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_SPLAT_VECTOR [[MV]](s64)
+    ; CHECK-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %2:_(s32) = G_CONSTANT i32 0
+    %3:_(s32) = G_CONSTANT i32 0
+    %1:_(s64) = G_MERGE_VALUES %2(s32), %3(s32)
+    %0:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR %1(s64)
+    $v8m8 = COPY %0(<vscale x 8 x s64>)
+    PseudoRET implicit $v8m8
+
+...
+
+---
+name:            splat_zero_nxv1f32
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv1f32
+    ; CHECK: [[C:%[0-9]+]]:fprb(s32) = G_FCONSTANT float 0.000000e+00
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+    ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %1:_(s32) = G_FCONSTANT float 0.000000e+00
+    %0:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR %1(s32)
+    $v8 = COPY %0(<vscale x 1 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv2f32
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv2f32
+    ; CHECK: [[C:%[0-9]+]]:fprb(s32) = G_FCONSTANT float 0.000000e+00
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+    ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %1:_(s32) = G_FCONSTANT float 0.000000e+00
+    %0:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR %1(s32)
+    $v8 = COPY %0(<vscale x 2 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv4f32
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv4f32
+    ; CHECK: [[C:%[0-9]+]]:fprb(s32) = G_FCONSTANT float 0.000000e+00
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+    ; CHECK-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %1:_(s32) = G_FCONSTANT float 0.000000e+00
+    %0:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR %1(s32)
+    $v8m2 = COPY %0(<vscale x 4 x s32>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            splat_zero_nxv8f32
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv8f32
+    ; CHECK: [[C:%[0-9]+]]:fprb(s32) = G_FCONSTANT float 0.000000e+00
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+    ; CHECK-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %1:_(s32) = G_FCONSTANT float 0.000000e+00
+    %0:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR %1(s32)
+    $v8m4 = COPY %0(<vscale x 8 x s32>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            splat_zero_nxv16f32
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv16f32
+    ; CHECK: [[C:%[0-9]+]]:fprb(s32) = G_FCONSTANT float 0.000000e+00
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+    ; CHECK-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %1:_(s32) = G_FCONSTANT float 0.000000e+00
+    %0:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR %1(s32)
+    $v8m8 = COPY %0(<vscale x 16 x s32>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            splat_zero_nxv1f64
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv1f64
+    ; CHECK: [[C:%[0-9]+]]:fprb(s64) = G_FCONSTANT double 0.000000e+00
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_SPLAT_VECTOR [[C]](s64)
+    ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %1:_(s64) = G_FCONSTANT double 0.000000e+00
+    %0:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR %1(s64)
+    $v8 = COPY %0(<vscale x 1 x s64>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv2f64
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv2f64
+    ; CHECK: [[C:%[0-9]+]]:fprb(s64) = G_FCONSTANT double 0.000000e+00
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_SPLAT_VECTOR [[C]](s64)
+    ; CHECK-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %1:_(s64) = G_FCONSTANT double 0.000000e+00
+    %0:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR %1(s64)
+    $v8m2 = COPY %0(<vscale x 2 x s64>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            splat_zero_nxv4f64
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv4f64
+    ; CHECK: [[C:%[0-9]+]]:fprb(s64) = G_FCONSTANT double 0.000000e+00
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_SPLAT_VECTOR [[C]](s64)
+    ; CHECK-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %1:_(s64) = G_FCONSTANT double 0.000000e+00
+    %0:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR %1(s64)
+    $v8m4 = COPY %0(<vscale x 4 x s64>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            splat_zero_nxv8f64
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv8f64
+    ; CHECK: [[C:%[0-9]+]]:fprb(s64) = G_FCONSTANT double 0.000000e+00
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_SPLAT_VECTOR [[C]](s64)
+    ; CHECK-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %1:_(s64) = G_FCONSTANT double 0.000000e+00
+    %0:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR %1(s64)
+    $v8m8 = COPY %0(<vscale x 8 x s64>)
+    PseudoRET implicit $v8m8
+
+...

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/splatvector-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/splatvector-rv64.mir
new file mode 100644
index 00000000000000..efd52372534498
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/splatvector-rv64.mir
@@ -0,0 +1,583 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=riscv64 -mattr=+m,+v -run-pass=regbankselect \
+# RUN:   -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN:   -o - | FileCheck %s
+
+---
+name:            splat_zero_nxv1i8
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv1i8
+    ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:gprb(s64) = G_ANYEXT [[C]](s32)
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+    ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %3:_(s32) = G_CONSTANT i32 0
+    %2:_(s64) = G_ANYEXT %3(s32)
+    %0:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR %2(s64)
+    $v8 = COPY %0(<vscale x 1 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv2i8
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv2i8
+    ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:gprb(s64) = G_ANYEXT [[C]](s32)
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+    ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %3:_(s32) = G_CONSTANT i32 0
+    %2:_(s64) = G_ANYEXT %3(s32)
+    %0:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR %2(s64)
+    $v8 = COPY %0(<vscale x 2 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv4i8
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv4i8
+    ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:gprb(s64) = G_ANYEXT [[C]](s32)
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+    ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %3:_(s32) = G_CONSTANT i32 0
+    %2:_(s64) = G_ANYEXT %3(s32)
+    %0:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR %2(s64)
+    $v8 = COPY %0(<vscale x 4 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv8i8
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv8i8
+    ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:gprb(s64) = G_ANYEXT [[C]](s32)
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+    ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %3:_(s32) = G_CONSTANT i32 0
+    %2:_(s64) = G_ANYEXT %3(s32)
+    %0:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR %2(s64)
+    $v8 = COPY %0(<vscale x 8 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv16i8
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv16i8
+    ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:gprb(s64) = G_ANYEXT [[C]](s32)
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+    ; CHECK-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %3:_(s32) = G_CONSTANT i32 0
+    %2:_(s64) = G_ANYEXT %3(s32)
+    %0:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR %2(s64)
+    $v8m2 = COPY %0(<vscale x 16 x s8>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            splat_zero_nxv32i8
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv32i8
+    ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:gprb(s64) = G_ANYEXT [[C]](s32)
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+    ; CHECK-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 32 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %3:_(s32) = G_CONSTANT i32 0
+    %2:_(s64) = G_ANYEXT %3(s32)
+    %0:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR %2(s64)
+    $v8m4 = COPY %0(<vscale x 32 x s8>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            splat_zero_nxv64i8
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv64i8
+    ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:gprb(s64) = G_ANYEXT [[C]](s32)
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+    ; CHECK-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 64 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %3:_(s32) = G_CONSTANT i32 0
+    %2:_(s64) = G_ANYEXT %3(s32)
+    %0:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR %2(s64)
+    $v8m8 = COPY %0(<vscale x 64 x s8>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            splat_zero_nxv1i16
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv1i16
+    ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:gprb(s64) = G_ANYEXT [[C]](s32)
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+    ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %3:_(s32) = G_CONSTANT i32 0
+    %2:_(s64) = G_ANYEXT %3(s32)
+    %0:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR %2(s64)
+    $v8 = COPY %0(<vscale x 1 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv2i16
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv2i16
+    ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:gprb(s64) = G_ANYEXT [[C]](s32)
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+    ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %3:_(s32) = G_CONSTANT i32 0
+    %2:_(s64) = G_ANYEXT %3(s32)
+    %0:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR %2(s64)
+    $v8 = COPY %0(<vscale x 2 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv4i16
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv4i16
+    ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:gprb(s64) = G_ANYEXT [[C]](s32)
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+    ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %3:_(s32) = G_CONSTANT i32 0
+    %2:_(s64) = G_ANYEXT %3(s32)
+    %0:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR %2(s64)
+    $v8 = COPY %0(<vscale x 4 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv8i16
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv8i16
+    ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:gprb(s64) = G_ANYEXT [[C]](s32)
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+    ; CHECK-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %3:_(s32) = G_CONSTANT i32 0
+    %2:_(s64) = G_ANYEXT %3(s32)
+    %0:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR %2(s64)
+    $v8m2 = COPY %0(<vscale x 8 x s16>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            splat_zero_nxv16i16
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv16i16
+    ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:gprb(s64) = G_ANYEXT [[C]](s32)
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+    ; CHECK-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %3:_(s32) = G_CONSTANT i32 0
+    %2:_(s64) = G_ANYEXT %3(s32)
+    %0:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR %2(s64)
+    $v8m4 = COPY %0(<vscale x 16 x s16>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            splat_zero_nxv32i16
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv32i16
+    ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:gprb(s64) = G_ANYEXT [[C]](s32)
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+    ; CHECK-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 32 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %3:_(s32) = G_CONSTANT i32 0
+    %2:_(s64) = G_ANYEXT %3(s32)
+    %0:_(<vscale x 32 x s16>) = G_SPLAT_VECTOR %2(s64)
+    $v8m8 = COPY %0(<vscale x 32 x s16>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            splat_zero_nxv1i32
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv1i32
+    ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:gprb(s64) = G_ANYEXT [[C]](s32)
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+    ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %1:_(s32) = G_CONSTANT i32 0
+    %2:_(s64) = G_ANYEXT %1(s32)
+    %0:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR %2(s64)
+    $v8 = COPY %0(<vscale x 1 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv2i32
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv2i32
+    ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:gprb(s64) = G_ANYEXT [[C]](s32)
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+    ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %1:_(s32) = G_CONSTANT i32 0
+    %2:_(s64) = G_ANYEXT %1(s32)
+    %0:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR %2(s64)
+    $v8 = COPY %0(<vscale x 2 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv4i32
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv4i32
+    ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:gprb(s64) = G_ANYEXT [[C]](s32)
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+    ; CHECK-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %1:_(s32) = G_CONSTANT i32 0
+    %2:_(s64) = G_ANYEXT %1(s32)
+    %0:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR %2(s64)
+    $v8m2 = COPY %0(<vscale x 4 x s32>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            splat_zero_nxv8i32
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv8i32
+    ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:gprb(s64) = G_ANYEXT [[C]](s32)
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+    ; CHECK-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %1:_(s32) = G_CONSTANT i32 0
+    %2:_(s64) = G_ANYEXT %1(s32)
+    %0:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR %2(s64)
+    $v8m4 = COPY %0(<vscale x 8 x s32>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            splat_zero_nxv16i32
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv16i32
+    ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:gprb(s64) = G_ANYEXT [[C]](s32)
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+    ; CHECK-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %1:_(s32) = G_CONSTANT i32 0
+    %2:_(s64) = G_ANYEXT %1(s32)
+    %0:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR %2(s64)
+    $v8m8 = COPY %0(<vscale x 16 x s32>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            splat_zero_nxv1i64
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv1i64
+    ; CHECK: [[C:%[0-9]+]]:gprb(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_SPLAT_VECTOR [[C]](s64)
+    ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %1:_(s64) = G_CONSTANT i64 0
+    %0:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR %1(s64)
+    $v8 = COPY %0(<vscale x 1 x s64>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv2i64
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv2i64
+    ; CHECK: [[C:%[0-9]+]]:gprb(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_SPLAT_VECTOR [[C]](s64)
+    ; CHECK-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %1:_(s64) = G_CONSTANT i64 0
+    %0:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR %1(s64)
+    $v8m2 = COPY %0(<vscale x 2 x s64>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            splat_zero_nxv4i64
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv4i64
+    ; CHECK: [[C:%[0-9]+]]:gprb(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_SPLAT_VECTOR [[C]](s64)
+    ; CHECK-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %1:_(s64) = G_CONSTANT i64 0
+    %0:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR %1(s64)
+    $v8m4 = COPY %0(<vscale x 4 x s64>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            splat_zero_nxv8i64
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv8i64
+    ; CHECK: [[C:%[0-9]+]]:gprb(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_SPLAT_VECTOR [[C]](s64)
+    ; CHECK-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %1:_(s64) = G_CONSTANT i64 0
+    %0:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR %1(s64)
+    $v8m8 = COPY %0(<vscale x 8 x s64>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            splat_zero_nxv1f32
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv1f32
+    ; CHECK: [[C:%[0-9]+]]:fprb(s32) = G_FCONSTANT float 0.000000e+00
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(s32) = COPY [[C]](s32)
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:gprb(s64) = G_ANYEXT [[COPY]](s32)
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+    ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %1:_(s32) = G_FCONSTANT float 0.000000e+00
+    %2:_(s64) = G_ANYEXT %1(s32)
+    %0:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR %2(s64)
+    $v8 = COPY %0(<vscale x 1 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv2f32
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv2f32
+    ; CHECK: [[C:%[0-9]+]]:fprb(s32) = G_FCONSTANT float 0.000000e+00
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(s32) = COPY [[C]](s32)
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:gprb(s64) = G_ANYEXT [[COPY]](s32)
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+    ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %1:_(s32) = G_FCONSTANT float 0.000000e+00
+    %2:_(s64) = G_ANYEXT %1(s32)
+    %0:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR %2(s64)
+    $v8 = COPY %0(<vscale x 2 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv4f32
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv4f32
+    ; CHECK: [[C:%[0-9]+]]:fprb(s32) = G_FCONSTANT float 0.000000e+00
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(s32) = COPY [[C]](s32)
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:gprb(s64) = G_ANYEXT [[COPY]](s32)
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+    ; CHECK-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %1:_(s32) = G_FCONSTANT float 0.000000e+00
+    %2:_(s64) = G_ANYEXT %1(s32)
+    %0:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR %2(s64)
+    $v8m2 = COPY %0(<vscale x 4 x s32>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            splat_zero_nxv8f32
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv8f32
+    ; CHECK: [[C:%[0-9]+]]:fprb(s32) = G_FCONSTANT float 0.000000e+00
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(s32) = COPY [[C]](s32)
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:gprb(s64) = G_ANYEXT [[COPY]](s32)
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+    ; CHECK-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %1:_(s32) = G_FCONSTANT float 0.000000e+00
+    %2:_(s64) = G_ANYEXT %1(s32)
+    %0:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR %2(s64)
+    $v8m4 = COPY %0(<vscale x 8 x s32>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            splat_zero_nxv16f32
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv16f32
+    ; CHECK: [[C:%[0-9]+]]:fprb(s32) = G_FCONSTANT float 0.000000e+00
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprb(s32) = COPY [[C]](s32)
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:gprb(s64) = G_ANYEXT [[COPY]](s32)
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+    ; CHECK-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %1:_(s32) = G_FCONSTANT float 0.000000e+00
+    %2:_(s64) = G_ANYEXT %1(s32)
+    %0:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR %2(s64)
+    $v8m8 = COPY %0(<vscale x 16 x s32>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            splat_zero_nxv1f64
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv1f64
+    ; CHECK: [[C:%[0-9]+]]:fprb(s64) = G_FCONSTANT double 0.000000e+00
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_SPLAT_VECTOR [[C]](s64)
+    ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %1:_(s64) = G_FCONSTANT double 0.000000e+00
+    %0:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR %1(s64)
+    $v8 = COPY %0(<vscale x 1 x s64>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            splat_zero_nxv2f64
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv2f64
+    ; CHECK: [[C:%[0-9]+]]:fprb(s64) = G_FCONSTANT double 0.000000e+00
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_SPLAT_VECTOR [[C]](s64)
+    ; CHECK-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %1:_(s64) = G_FCONSTANT double 0.000000e+00
+    %0:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR %1(s64)
+    $v8m2 = COPY %0(<vscale x 2 x s64>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            splat_zero_nxv4f64
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv4f64
+    ; CHECK: [[C:%[0-9]+]]:fprb(s64) = G_FCONSTANT double 0.000000e+00
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_SPLAT_VECTOR [[C]](s64)
+    ; CHECK-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %1:_(s64) = G_FCONSTANT double 0.000000e+00
+    %0:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR %1(s64)
+    $v8m4 = COPY %0(<vscale x 4 x s64>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            splat_zero_nxv8f64
+legalized:       true
+regBankSelected: false
+body:             |
+  bb.1:
+    ; CHECK-LABEL: name: splat_zero_nxv8f64
+    ; CHECK: [[C:%[0-9]+]]:fprb(s64) = G_FCONSTANT double 0.000000e+00
+    ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_SPLAT_VECTOR [[C]](s64)
+    ; CHECK-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %1:_(s64) = G_FCONSTANT double 0.000000e+00
+    %0:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR %1(s64)
+    $v8m8 = COPY %0(<vscale x 8 x s64>)
+    PseudoRET implicit $v8m8
+
+...


        


More information about the llvm-commits mailing list