[llvm] [GISEL] Add IRTranslation for shufflevector on scalable vector types (PR #80378)
Michael Maitland via llvm-commits
llvm-commits at lists.llvm.org
Wed Feb 7 15:09:21 PST 2024
https://github.com/michaelmaitland updated https://github.com/llvm/llvm-project/pull/80378
>From 0a3108a90a1a1603fc6977448e89619a695d447f Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Tue, 30 Jan 2024 07:21:27 -0800
Subject: [PATCH 1/6] [RISCV][GISEL] Add IRTranslation for shufflevector on
scalable vector types
---
llvm/lib/CodeGen/MachineVerifier.cpp | 32 +-
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 6 +-
.../GlobalISel/irtranslator/shufflevector.ll | 1524 +++++++++++++++++
3 files changed, 1550 insertions(+), 12 deletions(-)
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/shufflevector.ll
diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp
index fe865ed6f8a0e..ef1cefc4872ac 100644
--- a/llvm/lib/CodeGen/MachineVerifier.cpp
+++ b/llvm/lib/CodeGen/MachineVerifier.cpp
@@ -1618,20 +1618,34 @@ void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
// Don't check that all operands are vector because scalars are used in
// place of 1 element vectors.
- int SrcNumElts = Src0Ty.isVector() ? Src0Ty.getNumElements() : 1;
- int DstNumElts = DstTy.isVector() ? DstTy.getNumElements() : 1;
+ ElementCount SrcNumElts = Src0Ty.isVector() ? Src0Ty.getElementCount()
+ : ElementCount::getFixed(1);
+ ElementCount DstNumElts =
+ DstTy.isVector() ? DstTy.getElementCount() : ElementCount::getFixed(1);
ArrayRef<int> MaskIdxes = MaskOp.getShuffleMask();
- if (static_cast<int>(MaskIdxes.size()) != DstNumElts)
+ // For scalable vectors, there is an entry in the Mask for each
+ // KnownMinValue.
+ if (MaskIdxes.size() != DstNumElts.getKnownMinValue())
report("Wrong result type for shufflemask", MI);
- for (int Idx : MaskIdxes) {
- if (Idx < 0)
- continue;
-
- if (Idx >= 2 * SrcNumElts)
- report("Out of bounds shuffle index", MI);
+ if (Src0Ty.isScalableVector()) {
+ if (!llvm::all_of(MaskIdxes,
+ [&MaskIdxes](int M) { return M == MaskIdxes[0]; }))
+ report("Elements of a scalable G_SHUFFLE_VECTOR mask must match", MI);
+ if (MaskIdxes[0] != 0 && MaskIdxes[0] != -1)
+ report("Elements of a scalable G_SHUFFLE_VECTOR mask be zero or undef",
+ MI);
+ } else {
+ // Idxes for fixed vectors must be in bounds or undef, which is
+ // represented as -1.
+ for (int Idx : MaskIdxes) {
+ if (Idx < 0)
+ continue;
+ if ((unsigned)Idx >= 2 * SrcNumElts.getFixedValue())
+ report("Out of bounds shuffle index", MI);
+ }
}
break;
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 27037f4d5c5c8..cbcb826ba4d87 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -20608,11 +20608,11 @@ unsigned RISCVTargetLowering::getCustomCtpopCost(EVT VT,
bool RISCVTargetLowering::fallBackToDAGISel(const Instruction &Inst) const {
- // GISel support is in progress or complete for G_ADD, G_SUB, G_AND, G_OR, and
- // G_XOR.
+ // GISel support is in progress or complete for these opcodes.
unsigned Op = Inst.getOpcode();
if (Op == Instruction::Add || Op == Instruction::Sub ||
- Op == Instruction::And || Op == Instruction::Or || Op == Instruction::Xor)
+ Op == Instruction::And || Op == Instruction::Or ||
+ Op == Instruction::Xor || Op == Instruction::ShuffleVector)
return false;
if (Inst.getType()->isScalableTy())
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/shufflevector.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/shufflevector.ll
new file mode 100644
index 0000000000000..741f791ff3d05
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/shufflevector.ll
@@ -0,0 +1,1524 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=riscv32 -mattr=+v -global-isel -stop-after=irtranslator \
+; RUN: -verify-machineinstrs < %s | FileCheck -check-prefixes=RV32 %s
+; RUN: llc -mtriple=riscv64 -mattr=+v -global-isel -stop-after=irtranslator \
+; RUN: -verify-machineinstrs < %s | FileCheck -check-prefixes=RV64 %s
+
+define <vscale x 1 x i1> @shufflevector_nxv1i1_0() {
+ ; RV32-LABEL: name: shufflevector_nxv1i1_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 1 x s1>), [[DEF]], shufflemask(undef)
+ ; RV32-NEXT: $v0 = COPY [[SHUF]](<vscale x 1 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv1i1_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 1 x s1>), [[DEF]], shufflemask(undef)
+ ; RV64-NEXT: $v0 = COPY [[SHUF]](<vscale x 1 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ %a = shufflevector <vscale x 1 x i1> poison, <vscale x 1 x i1> poison, <vscale x 1 x i32> poison
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @shufflevector_nxv1i1_1() {
+ ; RV32-LABEL: name: shufflevector_nxv1i1_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 1 x s1>), [[DEF]], shufflemask(undef)
+ ; RV32-NEXT: $v0 = COPY [[SHUF]](<vscale x 1 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv1i1_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 1 x s1>), [[DEF]], shufflemask(undef)
+ ; RV64-NEXT: $v0 = COPY [[SHUF]](<vscale x 1 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ %a = shufflevector <vscale x 1 x i1> undef, <vscale x 1 x i1> undef, <vscale x 1 x i32> undef
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @shufflevector_nxv1i1_2(<vscale x 1 x i1> %a) {
+ ; RV32-LABEL: name: shufflevector_nxv1i1_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $v0
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 1 x s1>), [[DEF]], shufflemask(0)
+ ; RV32-NEXT: $v0 = COPY [[SHUF]](<vscale x 1 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv1i1_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $v0
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 1 x s1>), [[DEF]], shufflemask(0)
+ ; RV64-NEXT: $v0 = COPY [[SHUF]](<vscale x 1 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ %b = shufflevector <vscale x 1 x i1> %a , <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ ret <vscale x 1 x i1> %b
+}
+
+define <vscale x 2 x i1> @shufflevector_nxv2i1_0() {
+ ; RV32-LABEL: name: shufflevector_nxv2i1_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 2 x s1>), [[DEF]], shufflemask(undef, undef)
+ ; RV32-NEXT: $v0 = COPY [[SHUF]](<vscale x 2 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv2i1_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 2 x s1>), [[DEF]], shufflemask(undef, undef)
+ ; RV64-NEXT: $v0 = COPY [[SHUF]](<vscale x 2 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ %a = shufflevector <vscale x 2 x i1> poison, <vscale x 2 x i1> poison, <vscale x 2 x i32> poison
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @shufflevector_nxv2i1_1() {
+ ; RV32-LABEL: name: shufflevector_nxv2i1_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 2 x s1>), [[DEF]], shufflemask(undef, undef)
+ ; RV32-NEXT: $v0 = COPY [[SHUF]](<vscale x 2 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv2i1_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 2 x s1>), [[DEF]], shufflemask(undef, undef)
+ ; RV64-NEXT: $v0 = COPY [[SHUF]](<vscale x 2 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ %a = shufflevector <vscale x 2 x i1> undef, <vscale x 2 x i1> undef, <vscale x 2 x i32> undef
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @shufflevector_nxv2i1_2(<vscale x 2 x i1> %a) {
+ ; RV32-LABEL: name: shufflevector_nxv2i1_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $v0
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v0
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 2 x s1>), [[DEF]], shufflemask(0, 0)
+ ; RV32-NEXT: $v0 = COPY [[SHUF]](<vscale x 2 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv2i1_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $v0
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v0
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 2 x s1>), [[DEF]], shufflemask(0, 0)
+ ; RV64-NEXT: $v0 = COPY [[SHUF]](<vscale x 2 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ %b = shufflevector <vscale x 2 x i1> %a , <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ ret <vscale x 2 x i1> %b
+}
+
+define <vscale x 4 x i1> @shufflevector_nxv4i1_0() {
+ ; RV32-LABEL: name: shufflevector_nxv4i1_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 4 x s1>), [[DEF]], shufflemask(undef, undef, undef, undef)
+ ; RV32-NEXT: $v0 = COPY [[SHUF]](<vscale x 4 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv4i1_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 4 x s1>), [[DEF]], shufflemask(undef, undef, undef, undef)
+ ; RV64-NEXT: $v0 = COPY [[SHUF]](<vscale x 4 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ %a = shufflevector <vscale x 4 x i1> poison, <vscale x 4 x i1> poison, <vscale x 4 x i32> poison
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @shufflevector_nxv4i1_1() {
+ ; RV32-LABEL: name: shufflevector_nxv4i1_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 4 x s1>), [[DEF]], shufflemask(undef, undef, undef, undef)
+ ; RV32-NEXT: $v0 = COPY [[SHUF]](<vscale x 4 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv4i1_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 4 x s1>), [[DEF]], shufflemask(undef, undef, undef, undef)
+ ; RV64-NEXT: $v0 = COPY [[SHUF]](<vscale x 4 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ %a = shufflevector <vscale x 4 x i1> undef, <vscale x 4 x i1> undef, <vscale x 4 x i32> undef
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @shufflevector_nxv4i1_2(<vscale x 4 x i1> %a) {
+ ; RV32-LABEL: name: shufflevector_nxv4i1_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $v0
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v0
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 4 x s1>), [[DEF]], shufflemask(0, 0, 0, 0)
+ ; RV32-NEXT: $v0 = COPY [[SHUF]](<vscale x 4 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv4i1_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $v0
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v0
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 4 x s1>), [[DEF]], shufflemask(0, 0, 0, 0)
+ ; RV64-NEXT: $v0 = COPY [[SHUF]](<vscale x 4 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ %b = shufflevector <vscale x 4 x i1> %a , <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ ret <vscale x 4 x i1> %b
+}
+
+define <vscale x 8 x i1> @shufflevector_nxv8i1_0() {
+ ; RV32-LABEL: name: shufflevector_nxv8i1_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 8 x s1>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef)
+ ; RV32-NEXT: $v0 = COPY [[SHUF]](<vscale x 8 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv8i1_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 8 x s1>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef)
+ ; RV64-NEXT: $v0 = COPY [[SHUF]](<vscale x 8 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ %a = shufflevector <vscale x 8 x i1> poison, <vscale x 8 x i1> poison, <vscale x 8 x i32> poison
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @shufflevector_nxv8i1_1() {
+ ; RV32-LABEL: name: shufflevector_nxv8i1_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 8 x s1>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef)
+ ; RV32-NEXT: $v0 = COPY [[SHUF]](<vscale x 8 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv8i1_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 8 x s1>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef)
+ ; RV64-NEXT: $v0 = COPY [[SHUF]](<vscale x 8 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ %a = shufflevector <vscale x 8 x i1> undef, <vscale x 8 x i1> undef, <vscale x 8 x i32> undef
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @shufflevector_nxv8i1_2(<vscale x 8 x i1> %a) {
+ ; RV32-LABEL: name: shufflevector_nxv8i1_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $v0
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v0
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 8 x s1>), [[DEF]], shufflemask(0, 0, 0, 0, 0, 0, 0, 0)
+ ; RV32-NEXT: $v0 = COPY [[SHUF]](<vscale x 8 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv8i1_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $v0
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v0
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 8 x s1>), [[DEF]], shufflemask(0, 0, 0, 0, 0, 0, 0, 0)
+ ; RV64-NEXT: $v0 = COPY [[SHUF]](<vscale x 8 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ %b = shufflevector <vscale x 8 x i1> %a , <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ ret <vscale x 8 x i1> %b
+}
+
+define <vscale x 16 x i1> @shufflevector_nxv16i1_0() {
+ ; RV32-LABEL: name: shufflevector_nxv16i1_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 16 x s1>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef)
+ ; RV32-NEXT: $v0 = COPY [[SHUF]](<vscale x 16 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv16i1_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 16 x s1>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef)
+ ; RV64-NEXT: $v0 = COPY [[SHUF]](<vscale x 16 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ %a = shufflevector <vscale x 16 x i1> poison, <vscale x 16 x i1> poison, <vscale x 16 x i32> poison
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 16 x i1> @shufflevector_nxv16i1_1() {
+ ; RV32-LABEL: name: shufflevector_nxv16i1_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 16 x s1>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef)
+ ; RV32-NEXT: $v0 = COPY [[SHUF]](<vscale x 16 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv16i1_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 16 x s1>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef)
+ ; RV64-NEXT: $v0 = COPY [[SHUF]](<vscale x 16 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ %a = shufflevector <vscale x 16 x i1> undef, <vscale x 16 x i1> undef, <vscale x 16 x i32> undef
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 16 x i1> @shufflevector_nxv16i1_2(<vscale x 16 x i1> %a) {
+ ; RV32-LABEL: name: shufflevector_nxv16i1_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $v0
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v0
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 16 x s1>), [[DEF]], shufflemask(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+ ; RV32-NEXT: $v0 = COPY [[SHUF]](<vscale x 16 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv16i1_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $v0
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v0
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 16 x s1>), [[DEF]], shufflemask(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+ ; RV64-NEXT: $v0 = COPY [[SHUF]](<vscale x 16 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ %b = shufflevector <vscale x 16 x i1> %a , <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+ ret <vscale x 16 x i1> %b
+}
+
+define <vscale x 1 x i8> @shufflevector_nxv1i8_0() {
+ ; RV32-LABEL: name: shufflevector_nxv1i8_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 1 x s8>), [[DEF]], shufflemask(undef)
+ ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 1 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv1i8_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 1 x s8>), [[DEF]], shufflemask(undef)
+ ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 1 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = shufflevector <vscale x 1 x i8> poison, <vscale x 1 x i8> poison, <vscale x 1 x i32> poison
+ ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @shufflevector_nxv1i8_1() {
+ ; RV32-LABEL: name: shufflevector_nxv1i8_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 1 x s8>), [[DEF]], shufflemask(undef)
+ ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 1 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv1i8_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 1 x s8>), [[DEF]], shufflemask(undef)
+ ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 1 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = shufflevector <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i32> undef
+ ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @shufflevector_nxv1i8_2(<vscale x 1 x i8> %a) {
+ ; RV32-LABEL: name: shufflevector_nxv1i8_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 1 x s8>), [[DEF]], shufflemask(0)
+ ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 1 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv1i8_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 1 x s8>), [[DEF]], shufflemask(0)
+ ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 1 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %b = shufflevector <vscale x 1 x i8> %a , <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
+ ret <vscale x 1 x i8> %b
+}
+
+define <vscale x 2 x i8> @shufflevector_nxv2i8_0() {
+ ; RV32-LABEL: name: shufflevector_nxv2i8_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 2 x s8>), [[DEF]], shufflemask(undef, undef)
+ ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 2 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv2i8_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 2 x s8>), [[DEF]], shufflemask(undef, undef)
+ ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 2 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = shufflevector <vscale x 2 x i8> poison, <vscale x 2 x i8> poison, <vscale x 2 x i32> poison
+ ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @shufflevector_nxv2i8_1() {
+ ; RV32-LABEL: name: shufflevector_nxv2i8_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 2 x s8>), [[DEF]], shufflemask(undef, undef)
+ ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 2 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv2i8_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 2 x s8>), [[DEF]], shufflemask(undef, undef)
+ ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 2 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = shufflevector <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i32> undef
+ ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @shufflevector_nxv2i8_2(<vscale x 2 x i8> %a) {
+ ; RV32-LABEL: name: shufflevector_nxv2i8_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 2 x s8>), [[DEF]], shufflemask(0, 0)
+ ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 2 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv2i8_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 2 x s8>), [[DEF]], shufflemask(0, 0)
+ ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 2 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %b = shufflevector <vscale x 2 x i8> %a , <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
+ ret <vscale x 2 x i8> %b
+}
+
+define <vscale x 4 x i8> @shufflevector_nxv4i8_0() {
+ ; RV32-LABEL: name: shufflevector_nxv4i8_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 4 x s8>), [[DEF]], shufflemask(undef, undef, undef, undef)
+ ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 4 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv4i8_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 4 x s8>), [[DEF]], shufflemask(undef, undef, undef, undef)
+ ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 4 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = shufflevector <vscale x 4 x i8> poison, <vscale x 4 x i8> poison, <vscale x 4 x i32> poison
+ ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @shufflevector_nxv4i8_1() {
+ ; RV32-LABEL: name: shufflevector_nxv4i8_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 4 x s8>), [[DEF]], shufflemask(undef, undef, undef, undef)
+ ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 4 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv4i8_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 4 x s8>), [[DEF]], shufflemask(undef, undef, undef, undef)
+ ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 4 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = shufflevector <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i32> undef
+ ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @shufflevector_nxv4i8_2(<vscale x 4 x i8> %a) {
+ ; RV32-LABEL: name: shufflevector_nxv4i8_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 4 x s8>), [[DEF]], shufflemask(0, 0, 0, 0)
+ ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 4 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv4i8_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 4 x s8>), [[DEF]], shufflemask(0, 0, 0, 0)
+ ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 4 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %b = shufflevector <vscale x 4 x i8> %a , <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
+ ret <vscale x 4 x i8> %b
+}
+
+define <vscale x 8 x i8> @shufflevector_nxv8i8_0() {
+ ; RV32-LABEL: name: shufflevector_nxv8i8_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 8 x s8>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef)
+ ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 8 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv8i8_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 8 x s8>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef)
+ ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 8 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = shufflevector <vscale x 8 x i8> poison, <vscale x 8 x i8> poison, <vscale x 8 x i32> poison
+ ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @shufflevector_nxv8i8_1() {
+ ; RV32-LABEL: name: shufflevector_nxv8i8_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 8 x s8>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef)
+ ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 8 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv8i8_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 8 x s8>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef)
+ ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 8 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = shufflevector <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i32> undef
+ ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @shufflevector_nxv8i8_2(<vscale x 8 x i8> %a) {
+ ; RV32-LABEL: name: shufflevector_nxv8i8_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 8 x s8>), [[DEF]], shufflemask(0, 0, 0, 0, 0, 0, 0, 0)
+ ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 8 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv8i8_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 8 x s8>), [[DEF]], shufflemask(0, 0, 0, 0, 0, 0, 0, 0)
+ ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 8 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %b = shufflevector <vscale x 8 x i8> %a , <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
+ ret <vscale x 8 x i8> %b
+}
+
+define <vscale x 16 x i8> @shufflevector_nxv16i8_0() {
+ ; RV32-LABEL: name: shufflevector_nxv16i8_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 16 x s8>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef)
+ ; RV32-NEXT: $v8m2 = COPY [[SHUF]](<vscale x 16 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv16i8_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 16 x s8>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef)
+ ; RV64-NEXT: $v8m2 = COPY [[SHUF]](<vscale x 16 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %a = shufflevector <vscale x 16 x i8> poison, <vscale x 16 x i8> poison, <vscale x 16 x i32> poison
+ ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @shufflevector_nxv16i8_1() {
+ ; RV32-LABEL: name: shufflevector_nxv16i8_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 16 x s8>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef)
+ ; RV32-NEXT: $v8m2 = COPY [[SHUF]](<vscale x 16 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv16i8_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 16 x s8>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef)
+ ; RV64-NEXT: $v8m2 = COPY [[SHUF]](<vscale x 16 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %a = shufflevector <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i32> undef
+ ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @shufflevector_nxv16i8_2(<vscale x 16 x i8> %a) {
+ ; RV32-LABEL: name: shufflevector_nxv16i8_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $v8m2
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 16 x s8>), [[DEF]], shufflemask(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+ ; RV32-NEXT: $v8m2 = COPY [[SHUF]](<vscale x 16 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv16i8_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $v8m2
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 16 x s8>), [[DEF]], shufflemask(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+ ; RV64-NEXT: $v8m2 = COPY [[SHUF]](<vscale x 16 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %b = shufflevector <vscale x 16 x i8> %a , <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
+ ret <vscale x 16 x i8> %b
+}
+
+define <vscale x 1 x i16> @shufflevector_nxv1i16_0() {
+ ; RV32-LABEL: name: shufflevector_nxv1i16_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 1 x s16>), [[DEF]], shufflemask(undef)
+ ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 1 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv1i16_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 1 x s16>), [[DEF]], shufflemask(undef)
+ ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 1 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = shufflevector <vscale x 1 x i16> poison, <vscale x 1 x i16> poison, <vscale x 1 x i32> poison
+ ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 1 x i16> @shufflevector_nxv1i16_1() {
+ ; RV32-LABEL: name: shufflevector_nxv1i16_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 1 x s16>), [[DEF]], shufflemask(undef)
+ ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 1 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv1i16_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 1 x s16>), [[DEF]], shufflemask(undef)
+ ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 1 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = shufflevector <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i32> undef
+ ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 1 x i16> @shufflevector_nxv1i16_2(<vscale x 1 x i16> %a) {
+ ; RV32-LABEL: name: shufflevector_nxv1i16_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 1 x s16>), [[DEF]], shufflemask(0)
+ ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 1 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv1i16_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 1 x s16>), [[DEF]], shufflemask(0)
+ ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 1 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %b = shufflevector <vscale x 1 x i16> %a , <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
+ ret <vscale x 1 x i16> %b
+}
+
+define <vscale x 2 x i16> @shufflevector_nxv2i16_0() {
+ ; RV32-LABEL: name: shufflevector_nxv2i16_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 2 x s16>), [[DEF]], shufflemask(undef, undef)
+ ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 2 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv2i16_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 2 x s16>), [[DEF]], shufflemask(undef, undef)
+ ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 2 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = shufflevector <vscale x 2 x i16> poison, <vscale x 2 x i16> poison, <vscale x 2 x i32> poison
+ ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 2 x i16> @shufflevector_nxv2i16_1() {
+ ; RV32-LABEL: name: shufflevector_nxv2i16_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 2 x s16>), [[DEF]], shufflemask(undef, undef)
+ ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 2 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv2i16_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 2 x s16>), [[DEF]], shufflemask(undef, undef)
+ ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 2 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = shufflevector <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i32> undef
+ ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 2 x i16> @shufflevector_nxv2i16_2(<vscale x 2 x i16> %a) {
+ ; RV32-LABEL: name: shufflevector_nxv2i16_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 2 x s16>), [[DEF]], shufflemask(0, 0)
+ ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 2 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv2i16_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 2 x s16>), [[DEF]], shufflemask(0, 0)
+ ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 2 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %b = shufflevector <vscale x 2 x i16> %a , <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
+ ret <vscale x 2 x i16> %b
+}
+
+define <vscale x 4 x i16> @shufflevector_nxv4i16_0() {
+ ; RV32-LABEL: name: shufflevector_nxv4i16_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 4 x s16>), [[DEF]], shufflemask(undef, undef, undef, undef)
+ ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 4 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv4i16_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 4 x s16>), [[DEF]], shufflemask(undef, undef, undef, undef)
+ ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 4 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = shufflevector <vscale x 4 x i16> poison, <vscale x 4 x i16> poison, <vscale x 4 x i32> poison
+ ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 4 x i16> @shufflevector_nxv4i16_1() {
+ ; RV32-LABEL: name: shufflevector_nxv4i16_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 4 x s16>), [[DEF]], shufflemask(undef, undef, undef, undef)
+ ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 4 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv4i16_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 4 x s16>), [[DEF]], shufflemask(undef, undef, undef, undef)
+ ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 4 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = shufflevector <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i32> undef
+ ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 4 x i16> @shufflevector_nxv4i16_2(<vscale x 4 x i16> %a) {
+ ; RV32-LABEL: name: shufflevector_nxv4i16_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 4 x s16>), [[DEF]], shufflemask(0, 0, 0, 0)
+ ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 4 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv4i16_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 4 x s16>), [[DEF]], shufflemask(0, 0, 0, 0)
+ ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 4 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %b = shufflevector <vscale x 4 x i16> %a , <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
+ ret <vscale x 4 x i16> %b
+}
+
+define <vscale x 8 x i16> @shufflevector_nxv8i16_0() {
+ ; RV32-LABEL: name: shufflevector_nxv8i16_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 8 x s16>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef)
+ ; RV32-NEXT: $v8m2 = COPY [[SHUF]](<vscale x 8 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv8i16_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 8 x s16>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef)
+ ; RV64-NEXT: $v8m2 = COPY [[SHUF]](<vscale x 8 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %a = shufflevector <vscale x 8 x i16> poison, <vscale x 8 x i16> poison, <vscale x 8 x i32> poison
+ ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 8 x i16> @shufflevector_nxv8i16_1() {
+ ; RV32-LABEL: name: shufflevector_nxv8i16_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 8 x s16>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef)
+ ; RV32-NEXT: $v8m2 = COPY [[SHUF]](<vscale x 8 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv8i16_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 8 x s16>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef)
+ ; RV64-NEXT: $v8m2 = COPY [[SHUF]](<vscale x 8 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %a = shufflevector <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i32> undef
+ ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 8 x i16> @shufflevector_nxv8i16_2(<vscale x 8 x i16> %a) {
+ ; RV32-LABEL: name: shufflevector_nxv8i16_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $v8m2
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 8 x s16>), [[DEF]], shufflemask(0, 0, 0, 0, 0, 0, 0, 0)
+ ; RV32-NEXT: $v8m2 = COPY [[SHUF]](<vscale x 8 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv8i16_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $v8m2
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 8 x s16>), [[DEF]], shufflemask(0, 0, 0, 0, 0, 0, 0, 0)
+ ; RV64-NEXT: $v8m2 = COPY [[SHUF]](<vscale x 8 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %b = shufflevector <vscale x 8 x i16> %a , <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
+ ret <vscale x 8 x i16> %b
+}
+
+define <vscale x 16 x i16> @shufflevector_nxv16i16_0() {
+ ; RV32-LABEL: name: shufflevector_nxv16i16_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 16 x s16>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef)
+ ; RV32-NEXT: $v8m4 = COPY [[SHUF]](<vscale x 16 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv16i16_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 16 x s16>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef)
+ ; RV64-NEXT: $v8m4 = COPY [[SHUF]](<vscale x 16 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %a = shufflevector <vscale x 16 x i16> poison, <vscale x 16 x i16> poison, <vscale x 16 x i32> poison
+ ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 16 x i16> @shufflevector_nxv16i16_1() {
+ ; RV32-LABEL: name: shufflevector_nxv16i16_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 16 x s16>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef)
+ ; RV32-NEXT: $v8m4 = COPY [[SHUF]](<vscale x 16 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv16i16_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 16 x s16>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef)
+ ; RV64-NEXT: $v8m4 = COPY [[SHUF]](<vscale x 16 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %a = shufflevector <vscale x 16 x i16> undef, <vscale x 16 x i16> undef, <vscale x 16 x i32> undef
+ ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 16 x i16> @shufflevector_nxv16i16_2(<vscale x 16 x i16> %a) {
+ ; RV32-LABEL: name: shufflevector_nxv16i16_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $v8m4
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 16 x s16>), [[DEF]], shufflemask(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+ ; RV32-NEXT: $v8m4 = COPY [[SHUF]](<vscale x 16 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv16i16_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $v8m4
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 16 x s16>), [[DEF]], shufflemask(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+ ; RV64-NEXT: $v8m4 = COPY [[SHUF]](<vscale x 16 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %b = shufflevector <vscale x 16 x i16> %a , <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
+ ret <vscale x 16 x i16> %b
+}
+
+define <vscale x 1 x i32> @shufflevector_nxv1i32_0() {
+ ; RV32-LABEL: name: shufflevector_nxv1i32_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 1 x s32>), [[DEF]], shufflemask(undef)
+ ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 1 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv1i32_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 1 x s32>), [[DEF]], shufflemask(undef)
+ ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 1 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = shufflevector <vscale x 1 x i32> poison, <vscale x 1 x i32> poison, <vscale x 1 x i32> poison
+ ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 1 x i32> @shufflevector_nxv1i32_1() {
+ ; RV32-LABEL: name: shufflevector_nxv1i32_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 1 x s32>), [[DEF]], shufflemask(undef)
+ ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 1 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv1i32_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 1 x s32>), [[DEF]], shufflemask(undef)
+ ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 1 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = shufflevector <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef
+ ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 1 x i32> @shufflevector_nxv1i32_2(<vscale x 1 x i32> %a) {
+ ; RV32-LABEL: name: shufflevector_nxv1i32_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 1 x s32>), [[DEF]], shufflemask(0)
+ ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 1 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv1i32_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 1 x s32>), [[DEF]], shufflemask(0)
+ ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 1 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %b = shufflevector <vscale x 1 x i32> %a , <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
+ ret <vscale x 1 x i32> %b
+}
+
+define <vscale x 2 x i32> @shufflevector_nxv2i32_0() {
+ ; RV32-LABEL: name: shufflevector_nxv2i32_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 2 x s32>), [[DEF]], shufflemask(undef, undef)
+ ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 2 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv2i32_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 2 x s32>), [[DEF]], shufflemask(undef, undef)
+ ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 2 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = shufflevector <vscale x 2 x i32> poison, <vscale x 2 x i32> poison, <vscale x 2 x i32> poison
+ ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 2 x i32> @shufflevector_nxv2i32_1() {
+ ; RV32-LABEL: name: shufflevector_nxv2i32_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 2 x s32>), [[DEF]], shufflemask(undef, undef)
+ ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 2 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv2i32_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 2 x s32>), [[DEF]], shufflemask(undef, undef)
+ ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 2 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = shufflevector <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef
+ ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 2 x i32> @shufflevector_nxv2i32_2(<vscale x 2 x i32> %a) {
+ ; RV32-LABEL: name: shufflevector_nxv2i32_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 2 x s32>), [[DEF]], shufflemask(0, 0)
+ ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 2 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv2i32_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 2 x s32>), [[DEF]], shufflemask(0, 0)
+ ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 2 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %b = shufflevector <vscale x 2 x i32> %a , <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+ ret <vscale x 2 x i32> %b
+}
+
+define <vscale x 4 x i32> @shufflevector_nxv4i32_0() {
+ ; RV32-LABEL: name: shufflevector_nxv4i32_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 4 x s32>), [[DEF]], shufflemask(undef, undef, undef, undef)
+ ; RV32-NEXT: $v8m2 = COPY [[SHUF]](<vscale x 4 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv4i32_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 4 x s32>), [[DEF]], shufflemask(undef, undef, undef, undef)
+ ; RV64-NEXT: $v8m2 = COPY [[SHUF]](<vscale x 4 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %a = shufflevector <vscale x 4 x i32> poison, <vscale x 4 x i32> poison, <vscale x 4 x i32> poison
+ ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 4 x i32> @shufflevector_nxv4i32_1() {
+ ; RV32-LABEL: name: shufflevector_nxv4i32_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 4 x s32>), [[DEF]], shufflemask(undef, undef, undef, undef)
+ ; RV32-NEXT: $v8m2 = COPY [[SHUF]](<vscale x 4 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv4i32_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 4 x s32>), [[DEF]], shufflemask(undef, undef, undef, undef)
+ ; RV64-NEXT: $v8m2 = COPY [[SHUF]](<vscale x 4 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %a = shufflevector <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef
+ ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 4 x i32> @shufflevector_nxv4i32_2(<vscale x 4 x i32> %a) {
+ ; RV32-LABEL: name: shufflevector_nxv4i32_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $v8m2
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 4 x s32>), [[DEF]], shufflemask(0, 0, 0, 0)
+ ; RV32-NEXT: $v8m2 = COPY [[SHUF]](<vscale x 4 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv4i32_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $v8m2
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 4 x s32>), [[DEF]], shufflemask(0, 0, 0, 0)
+ ; RV64-NEXT: $v8m2 = COPY [[SHUF]](<vscale x 4 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %b = shufflevector <vscale x 4 x i32> %a , <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+ ret <vscale x 4 x i32> %b
+}
+
+define <vscale x 8 x i32> @shufflevector_nxv8i32_0() {
+ ; RV32-LABEL: name: shufflevector_nxv8i32_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 8 x s32>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef)
+ ; RV32-NEXT: $v8m4 = COPY [[SHUF]](<vscale x 8 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv8i32_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 8 x s32>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef)
+ ; RV64-NEXT: $v8m4 = COPY [[SHUF]](<vscale x 8 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %a = shufflevector <vscale x 8 x i32> poison, <vscale x 8 x i32> poison, <vscale x 8 x i32> poison
+ ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 8 x i32> @shufflevector_nxv8i32_1() {
+ ; RV32-LABEL: name: shufflevector_nxv8i32_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 8 x s32>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef)
+ ; RV32-NEXT: $v8m4 = COPY [[SHUF]](<vscale x 8 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv8i32_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 8 x s32>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef)
+ ; RV64-NEXT: $v8m4 = COPY [[SHUF]](<vscale x 8 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %a = shufflevector <vscale x 8 x i32> undef, <vscale x 8 x i32> undef, <vscale x 8 x i32> undef
+ ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 8 x i32> @shufflevector_nxv8i32_2(<vscale x 8 x i32> %a) {
+ ; RV32-LABEL: name: shufflevector_nxv8i32_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $v8m4
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 8 x s32>), [[DEF]], shufflemask(0, 0, 0, 0, 0, 0, 0, 0)
+ ; RV32-NEXT: $v8m4 = COPY [[SHUF]](<vscale x 8 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv8i32_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $v8m4
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 8 x s32>), [[DEF]], shufflemask(0, 0, 0, 0, 0, 0, 0, 0)
+ ; RV64-NEXT: $v8m4 = COPY [[SHUF]](<vscale x 8 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %b = shufflevector <vscale x 8 x i32> %a , <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
+ ret <vscale x 8 x i32> %b
+}
+
+define <vscale x 16 x i32> @shufflevector_nxv16i32_0() {
+ ; RV32-LABEL: name: shufflevector_nxv16i32_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 16 x s32>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef)
+ ; RV32-NEXT: $v8m8 = COPY [[SHUF]](<vscale x 16 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv16i32_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 16 x s32>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef)
+ ; RV64-NEXT: $v8m8 = COPY [[SHUF]](<vscale x 16 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %a = shufflevector <vscale x 16 x i32> poison, <vscale x 16 x i32> poison, <vscale x 16 x i32> poison
+ ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 16 x i32> @shufflevector_nxv16i32_1() {
+ ; RV32-LABEL: name: shufflevector_nxv16i32_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 16 x s32>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef)
+ ; RV32-NEXT: $v8m8 = COPY [[SHUF]](<vscale x 16 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv16i32_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 16 x s32>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef)
+ ; RV64-NEXT: $v8m8 = COPY [[SHUF]](<vscale x 16 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %a = shufflevector <vscale x 16 x i32> undef, <vscale x 16 x i32> undef, <vscale x 16 x i32> undef
+ ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 16 x i32> @shufflevector_nxv16i32_2(<vscale x 16 x i32> %a) {
+ ; RV32-LABEL: name: shufflevector_nxv16i32_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $v8m8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8m8
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 16 x s32>), [[DEF]], shufflemask(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+ ; RV32-NEXT: $v8m8 = COPY [[SHUF]](<vscale x 16 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv16i32_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $v8m8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8m8
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 16 x s32>), [[DEF]], shufflemask(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+ ; RV64-NEXT: $v8m8 = COPY [[SHUF]](<vscale x 16 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %b = shufflevector <vscale x 16 x i32> %a , <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
+ ret <vscale x 16 x i32> %b
+}
+
+define <vscale x 1 x i64> @shufflevector_nxv1i64_0() {
+ ; RV32-LABEL: name: shufflevector_nxv1i64_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 1 x s64>), [[DEF]], shufflemask(undef)
+ ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 1 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv1i64_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 1 x s64>), [[DEF]], shufflemask(undef)
+ ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 1 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = shufflevector <vscale x 1 x i64> poison, <vscale x 1 x i64> poison, <vscale x 1 x i32> poison
+ ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 1 x i64> @shufflevector_nxv1i64_1() {
+ ; RV32-LABEL: name: shufflevector_nxv1i64_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 1 x s64>), [[DEF]], shufflemask(undef)
+ ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 1 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv1i64_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 1 x s64>), [[DEF]], shufflemask(undef)
+ ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 1 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = shufflevector <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i32> undef
+ ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 1 x i64> @shufflevector_nxv1i64_2(<vscale x 1 x i64> %a) {
+ ; RV32-LABEL: name: shufflevector_nxv1i64_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 1 x s64>), [[DEF]], shufflemask(0)
+ ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 1 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv1i64_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 1 x s64>), [[DEF]], shufflemask(0)
+ ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 1 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %b = shufflevector <vscale x 1 x i64> %a , <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
+ ret <vscale x 1 x i64> %b
+}
+
+define <vscale x 2 x i64> @shufflevector_nxv2i64_0() {
+ ; RV32-LABEL: name: shufflevector_nxv2i64_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 2 x s64>), [[DEF]], shufflemask(undef, undef)
+ ; RV32-NEXT: $v8m2 = COPY [[SHUF]](<vscale x 2 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv2i64_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 2 x s64>), [[DEF]], shufflemask(undef, undef)
+ ; RV64-NEXT: $v8m2 = COPY [[SHUF]](<vscale x 2 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %a = shufflevector <vscale x 2 x i64> poison, <vscale x 2 x i64> poison, <vscale x 2 x i32> poison
+ ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 2 x i64> @shufflevector_nxv2i64_1() {
+ ; RV32-LABEL: name: shufflevector_nxv2i64_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 2 x s64>), [[DEF]], shufflemask(undef, undef)
+ ; RV32-NEXT: $v8m2 = COPY [[SHUF]](<vscale x 2 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv2i64_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 2 x s64>), [[DEF]], shufflemask(undef, undef)
+ ; RV64-NEXT: $v8m2 = COPY [[SHUF]](<vscale x 2 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %a = shufflevector <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i32> undef
+ ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 2 x i64> @shufflevector_nxv2i64_2(<vscale x 2 x i64> %a) {
+ ; RV32-LABEL: name: shufflevector_nxv2i64_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $v8m2
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 2 x s64>), [[DEF]], shufflemask(0, 0)
+ ; RV32-NEXT: $v8m2 = COPY [[SHUF]](<vscale x 2 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv2i64_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $v8m2
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 2 x s64>), [[DEF]], shufflemask(0, 0)
+ ; RV64-NEXT: $v8m2 = COPY [[SHUF]](<vscale x 2 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %b = shufflevector <vscale x 2 x i64> %a , <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+ ret <vscale x 2 x i64> %b
+}
+
+define <vscale x 4 x i64> @shufflevector_nxv4i64_0() {
+ ; RV32-LABEL: name: shufflevector_nxv4i64_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 4 x s64>), [[DEF]], shufflemask(undef, undef, undef, undef)
+ ; RV32-NEXT: $v8m4 = COPY [[SHUF]](<vscale x 4 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv4i64_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 4 x s64>), [[DEF]], shufflemask(undef, undef, undef, undef)
+ ; RV64-NEXT: $v8m4 = COPY [[SHUF]](<vscale x 4 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %a = shufflevector <vscale x 4 x i64> poison, <vscale x 4 x i64> poison, <vscale x 4 x i32> poison
+ ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 4 x i64> @shufflevector_nxv4i64_1() {
+ ; RV32-LABEL: name: shufflevector_nxv4i64_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 4 x s64>), [[DEF]], shufflemask(undef, undef, undef, undef)
+ ; RV32-NEXT: $v8m4 = COPY [[SHUF]](<vscale x 4 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv4i64_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 4 x s64>), [[DEF]], shufflemask(undef, undef, undef, undef)
+ ; RV64-NEXT: $v8m4 = COPY [[SHUF]](<vscale x 4 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %a = shufflevector <vscale x 4 x i64> undef, <vscale x 4 x i64> undef, <vscale x 4 x i32> undef
+ ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 4 x i64> @shufflevector_nxv4i64_2(<vscale x 4 x i64> %a) {
+ ; RV32-LABEL: name: shufflevector_nxv4i64_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $v8m4
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8m4
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 4 x s64>), [[DEF]], shufflemask(0, 0, 0, 0)
+ ; RV32-NEXT: $v8m4 = COPY [[SHUF]](<vscale x 4 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv4i64_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $v8m4
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8m4
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 4 x s64>), [[DEF]], shufflemask(0, 0, 0, 0)
+ ; RV64-NEXT: $v8m4 = COPY [[SHUF]](<vscale x 4 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %b = shufflevector <vscale x 4 x i64> %a , <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+ ret <vscale x 4 x i64> %b
+}
+
+define <vscale x 8 x i64> @shufflevector_nxv8i64_0() {
+ ; RV32-LABEL: name: shufflevector_nxv8i64_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 8 x s64>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef)
+ ; RV32-NEXT: $v8m8 = COPY [[SHUF]](<vscale x 8 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv8i64_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 8 x s64>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef)
+ ; RV64-NEXT: $v8m8 = COPY [[SHUF]](<vscale x 8 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %a = shufflevector <vscale x 8 x i64> poison, <vscale x 8 x i64> poison, <vscale x 8 x i32> poison
+ ret <vscale x 8 x i64> %a
+}
+
+define <vscale x 8 x i64> @shufflevector_nxv8i64_1() {
+ ; RV32-LABEL: name: shufflevector_nxv8i64_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 8 x s64>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef)
+ ; RV32-NEXT: $v8m8 = COPY [[SHUF]](<vscale x 8 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv8i64_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 8 x s64>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef)
+ ; RV64-NEXT: $v8m8 = COPY [[SHUF]](<vscale x 8 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %a = shufflevector <vscale x 8 x i64> undef, <vscale x 8 x i64> undef, <vscale x 8 x i32> undef
+ ret <vscale x 8 x i64> %a
+}
+
+define <vscale x 8 x i64> @shufflevector_nxv8i64_2(<vscale x 8 x i64> %a) {
+ ; RV32-LABEL: name: shufflevector_nxv8i64_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $v8m8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8m8
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 8 x s64>), [[DEF]], shufflemask(0, 0, 0, 0, 0, 0, 0, 0)
+ ; RV32-NEXT: $v8m8 = COPY [[SHUF]](<vscale x 8 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv8i64_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $v8m8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8m8
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 8 x s64>), [[DEF]], shufflemask(0, 0, 0, 0, 0, 0, 0, 0)
+ ; RV64-NEXT: $v8m8 = COPY [[SHUF]](<vscale x 8 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %b = shufflevector <vscale x 8 x i64> %a , <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
+ ret <vscale x 8 x i64> %b
+}
+
+define <vscale x 16 x i64> @shufflevector_nxv16i64_0() {
+ ; RV32-LABEL: name: shufflevector_nxv16i64_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s64>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 16 x s64>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef)
+ ; RV32-NEXT: [[UV:%[0-9]+]]:_(<vscale x 8 x s64>), [[UV1:%[0-9]+]]:_(<vscale x 8 x s64>) = G_UNMERGE_VALUES [[SHUF]](<vscale x 16 x s64>)
+ ; RV32-NEXT: $v8m8 = COPY [[UV]](<vscale x 8 x s64>)
+ ; RV32-NEXT: $v16m8 = COPY [[UV1]](<vscale x 8 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8, implicit $v16m8
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv16i64_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s64>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 16 x s64>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef)
+ ; RV64-NEXT: [[UV:%[0-9]+]]:_(<vscale x 8 x s64>), [[UV1:%[0-9]+]]:_(<vscale x 8 x s64>) = G_UNMERGE_VALUES [[SHUF]](<vscale x 16 x s64>)
+ ; RV64-NEXT: $v8m8 = COPY [[UV]](<vscale x 8 x s64>)
+ ; RV64-NEXT: $v16m8 = COPY [[UV1]](<vscale x 8 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8, implicit $v16m8
+ %a = shufflevector <vscale x 16 x i64> poison, <vscale x 16 x i64> poison, <vscale x 16 x i32> poison
+ ret <vscale x 16 x i64> %a
+}
+
+define <vscale x 16 x i64> @shufflevector_nxv16i64_1() {
+ ; RV32-LABEL: name: shufflevector_nxv16i64_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s64>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 16 x s64>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef)
+ ; RV32-NEXT: [[UV:%[0-9]+]]:_(<vscale x 8 x s64>), [[UV1:%[0-9]+]]:_(<vscale x 8 x s64>) = G_UNMERGE_VALUES [[SHUF]](<vscale x 16 x s64>)
+ ; RV32-NEXT: $v8m8 = COPY [[UV]](<vscale x 8 x s64>)
+ ; RV32-NEXT: $v16m8 = COPY [[UV1]](<vscale x 8 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8, implicit $v16m8
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv16i64_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s64>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 16 x s64>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef)
+ ; RV64-NEXT: [[UV:%[0-9]+]]:_(<vscale x 8 x s64>), [[UV1:%[0-9]+]]:_(<vscale x 8 x s64>) = G_UNMERGE_VALUES [[SHUF]](<vscale x 16 x s64>)
+ ; RV64-NEXT: $v8m8 = COPY [[UV]](<vscale x 8 x s64>)
+ ; RV64-NEXT: $v16m8 = COPY [[UV1]](<vscale x 8 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8, implicit $v16m8
+ %a = shufflevector <vscale x 16 x i64> undef, <vscale x 16 x i64> undef, <vscale x 16 x i32> undef
+ ret <vscale x 16 x i64> %a
+}
+
+define <vscale x 16 x i64> @shufflevector_nxv16i64_2(<vscale x 16 x i64> %a) {
+ ; RV32-LABEL: name: shufflevector_nxv16i64_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $v8m8, $v16m8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8m8
+ ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v16m8
+ ; RV32-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 16 x s64>) = G_CONCAT_VECTORS [[COPY]](<vscale x 8 x s64>), [[COPY1]](<vscale x 8 x s64>)
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s64>) = G_SHUFFLE_VECTOR [[CONCAT_VECTORS]](<vscale x 16 x s64>), [[DEF]], shufflemask(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+ ; RV32-NEXT: [[UV:%[0-9]+]]:_(<vscale x 8 x s64>), [[UV1:%[0-9]+]]:_(<vscale x 8 x s64>) = G_UNMERGE_VALUES [[SHUF]](<vscale x 16 x s64>)
+ ; RV32-NEXT: $v8m8 = COPY [[UV]](<vscale x 8 x s64>)
+ ; RV32-NEXT: $v16m8 = COPY [[UV1]](<vscale x 8 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8, implicit $v16m8
+ ;
+ ; RV64-LABEL: name: shufflevector_nxv16i64_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $v8m8, $v16m8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8m8
+ ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v16m8
+ ; RV64-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 16 x s64>) = G_CONCAT_VECTORS [[COPY]](<vscale x 8 x s64>), [[COPY1]](<vscale x 8 x s64>)
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s64>) = G_SHUFFLE_VECTOR [[CONCAT_VECTORS]](<vscale x 16 x s64>), [[DEF]], shufflemask(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+ ; RV64-NEXT: [[UV:%[0-9]+]]:_(<vscale x 8 x s64>), [[UV1:%[0-9]+]]:_(<vscale x 8 x s64>) = G_UNMERGE_VALUES [[SHUF]](<vscale x 16 x s64>)
+ ; RV64-NEXT: $v8m8 = COPY [[UV]](<vscale x 8 x s64>)
+ ; RV64-NEXT: $v16m8 = COPY [[UV1]](<vscale x 8 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8, implicit $v16m8
+ %b = shufflevector <vscale x 16 x i64> %a , <vscale x 16 x i64> poison, <vscale x 16 x i32> zeroinitializer
+ ret <vscale x 16 x i64> %b
+}
+
+
+
>From 18fac750080cecae65943e8c3118153b936cfca4 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Mon, 5 Feb 2024 07:40:20 -0800
Subject: [PATCH 2/6] fixme! add machineverifier tests for new report cases
---
llvm/lib/CodeGen/MachineVerifier.cpp | 3 ++-
llvm/test/MachineVerifier/test_g_shuffle_vector.mir | 11 +++++++++++
2 files changed, 13 insertions(+), 1 deletion(-)
diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp
index ef1cefc4872ac..8ccd4192a0014 100644
--- a/llvm/lib/CodeGen/MachineVerifier.cpp
+++ b/llvm/lib/CodeGen/MachineVerifier.cpp
@@ -1635,7 +1635,8 @@ void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
[&MaskIdxes](int M) { return M == MaskIdxes[0]; }))
report("Elements of a scalable G_SHUFFLE_VECTOR mask must match", MI);
if (MaskIdxes[0] != 0 && MaskIdxes[0] != -1)
- report("Elements of a scalable G_SHUFFLE_VECTOR mask be zero or undef",
+ report("Elements of a scalable G_SHUFFLE_VECTOR mask must be zero or "
+ "undef",
MI);
} else {
// Idxes for fixed vectors must be in bounds or undef, which is
diff --git a/llvm/test/MachineVerifier/test_g_shuffle_vector.mir b/llvm/test/MachineVerifier/test_g_shuffle_vector.mir
index 6aba6731c9fee..41f46d0c184ee 100644
--- a/llvm/test/MachineVerifier/test_g_shuffle_vector.mir
+++ b/llvm/test/MachineVerifier/test_g_shuffle_vector.mir
@@ -56,4 +56,15 @@ body: |
%22:_(s32) = G_IMPLICIT_DEF
%20:_(<2 x s32>) = G_SHUFFLE_VECTOR %22, %22, shufflemask(0, 2)
+ %23:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ %24:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+
+ ; CHECK: Bad machine code: Wrong result type for shufflemask
+ %25:_(<vscale x 1 x s32>) = G_SHUFFLE_VECTOR %23, %23, shufflemask(0, 0)
+
+ ; CHECK: Elements of a scalable G_SHUFFLE_VECTOR mask must match
+ %26:_(<vscale x 2 x s32>) = G_SHUFFLE_VECTOR %24, %24, shufflemask(0, -1)
+
+ ; CHECK: Bad machine code: Elements of a scalable G_SHUFFLE_VECTOR mask must be zero or undef
+ %27:_(<vscale x 2 x s32>) = G_SHUFFLE_VECTOR %24, %24, shufflemask(1, 1)
...
>From 37f8d7c5ad88dfbffaa78d933fcd4100edf08345 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Wed, 7 Feb 2024 07:25:41 -0800
Subject: [PATCH 3/6] fixup! select shufflevector as G_SPLAT_VECTOR
---
.../CodeGen/GlobalISel/MachineIRBuilder.h | 14 +-
llvm/include/llvm/Support/TargetOpcodes.def | 3 +
llvm/include/llvm/Target/GenericOpcodes.td | 7 +
llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp | 6 +-
llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp | 23 +-
.../CodeGen/GlobalISel/LegalizerHelper.cpp | 2 +-
.../CodeGen/GlobalISel/MachineIRBuilder.cpp | 17 +-
.../GlobalISel/irtranslator/shufflevector.ll | 950 +++++++++++-------
8 files changed, 658 insertions(+), 364 deletions(-)
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h b/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
index 1387a0a37561c..634403b069aa8 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
@@ -1063,8 +1063,8 @@ class MachineIRBuilder {
/// Build and insert \p Res = G_BUILD_VECTOR with \p Src replicated to fill
/// the number of elements
- MachineInstrBuilder buildSplatVector(const DstOp &Res,
- const SrcOp &Src);
+ MachineInstrBuilder buildBuildVectorSplatVector(const DstOp &Res,
+ const SrcOp &Src);
/// Build and insert \p Res = G_BUILD_VECTOR_TRUNC \p Op0, ...
///
@@ -1099,6 +1099,16 @@ class MachineIRBuilder {
MachineInstrBuilder buildShuffleVector(const DstOp &Res, const SrcOp &Src1,
const SrcOp &Src2, ArrayRef<int> Mask);
+ /// Build and insert \p Res = G_SPLAT_VECTOR \p Val
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Res must be a generic virtual register with vector type.
+ /// \pre \p Val must be a generic virtual register with scalar type.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ MachineInstrBuilder buildSplatVector(const DstOp &Res,
+ const SrcOp &Val);
+
/// Build and insert \p Res = G_CONCAT_VECTORS \p Op0, ...
///
/// G_CONCAT_VECTORS creates a vector from the concatenation of 2 or more
diff --git a/llvm/include/llvm/Support/TargetOpcodes.def b/llvm/include/llvm/Support/TargetOpcodes.def
index abb237083d254..dc30476f4fdc4 100644
--- a/llvm/include/llvm/Support/TargetOpcodes.def
+++ b/llvm/include/llvm/Support/TargetOpcodes.def
@@ -728,6 +728,9 @@ HANDLE_TARGET_OPCODE(G_EXTRACT_VECTOR_ELT)
/// Generic shufflevector.
HANDLE_TARGET_OPCODE(G_SHUFFLE_VECTOR)
+/// Generic splatvector.
+HANDLE_TARGET_OPCODE(G_SPLAT_VECTOR)
+
/// Generic count trailing zeroes.
HANDLE_TARGET_OPCODE(G_CTTZ)
diff --git a/llvm/include/llvm/Target/GenericOpcodes.td b/llvm/include/llvm/Target/GenericOpcodes.td
index 2c73b67f9e1af..a59ff5dd7eec4 100644
--- a/llvm/include/llvm/Target/GenericOpcodes.td
+++ b/llvm/include/llvm/Target/GenericOpcodes.td
@@ -1444,6 +1444,13 @@ def G_SHUFFLE_VECTOR: GenericInstruction {
let hasSideEffects = false;
}
+// Generic splatvector.
+def G_SPLAT_VECTOR: GenericInstruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type1:$val);
+ let hasSideEffects = false;
+}
+
//------------------------------------------------------------------------------
// Vector reductions
//------------------------------------------------------------------------------
diff --git a/llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp b/llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp
index 64e2d517e3b9c..0289e753d5614 100644
--- a/llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp
@@ -309,7 +309,8 @@ MachineInstrBuilder CSEMIRBuilder::buildConstant(const DstOp &Res,
// For vectors, CSE the element only for now.
LLT Ty = Res.getLLTTy(*getMRI());
if (Ty.isVector())
- return buildSplatVector(Res, buildConstant(Ty.getElementType(), Val));
+ return buildBuildVectorSplatVector(Res,
+ buildConstant(Ty.getElementType(), Val));
FoldingSetNodeID ID;
GISelInstProfileBuilder ProfBuilder(ID, *getMRI());
@@ -336,7 +337,8 @@ MachineInstrBuilder CSEMIRBuilder::buildFConstant(const DstOp &Res,
// For vectors, CSE the element only for now.
LLT Ty = Res.getLLTTy(*getMRI());
if (Ty.isVector())
- return buildSplatVector(Res, buildFConstant(Ty.getElementType(), Val));
+ return buildBuildVectorSplatVector(
+ Res, buildFConstant(Ty.getElementType(), Val));
FoldingSetNodeID ID;
GISelInstProfileBuilder ProfBuilder(ID, *getMRI());
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index dd38317c26bff..ed2687f0a8e64 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -1602,10 +1602,10 @@ bool IRTranslator::translateGetElementPtr(const User &U,
// We might need to splat the base pointer into a vector if the offsets
// are vectors.
if (WantSplatVector && !PtrTy.isVector()) {
- BaseReg =
- MIRBuilder
- .buildSplatVector(LLT::fixed_vector(VectorWidth, PtrTy), BaseReg)
- .getReg(0);
+ BaseReg = MIRBuilder
+ .buildBuildVectorSplatVector(
+ LLT::fixed_vector(VectorWidth, PtrTy), BaseReg)
+ .getReg(0);
PtrIRTy = FixedVectorType::get(PtrIRTy, VectorWidth);
PtrTy = getLLTForType(*PtrIRTy, *DL);
OffsetIRTy = DL->getIndexType(PtrIRTy);
@@ -1643,7 +1643,7 @@ bool IRTranslator::translateGetElementPtr(const User &U,
LLT IdxTy = MRI->getType(IdxReg);
if (IdxTy != OffsetTy) {
if (!IdxTy.isVector() && WantSplatVector) {
- IdxReg = MIRBuilder.buildSplatVector(
+ IdxReg = MIRBuilder.buildBuildVectorSplatVector(
OffsetTy.changeElementType(IdxTy), IdxReg).getReg(0);
}
@@ -2998,6 +2998,19 @@ bool IRTranslator::translateExtractElement(const User &U,
bool IRTranslator::translateShuffleVector(const User &U,
MachineIRBuilder &MIRBuilder) {
+ // A ShuffleVector that has operates on scalable vectors is a splat vector
+ // where the value of the splat vector is the 0th element of the first
+ // operand, since the index mask operand is the zeroinitializer (undef and
+ // poison are treated as zeroinitializer here).
+ if (U.getOperand(0)->getType()->isScalableTy()) {
+ Value *Op0 = U.getOperand(0);
+ auto SplatVal = MIRBuilder.buildExtractVectorElementConstant(
+ LLT::scalar(Op0->getType()->getScalarSizeInBits()),
+ getOrCreateVReg(*Op0), 0);
+ MIRBuilder.buildSplatVector(getOrCreateVReg(U), SplatVal);
+ return true;
+ }
+
ArrayRef<int> Mask;
if (auto *SVI = dyn_cast<ShuffleVectorInst>(&U))
Mask = SVI->getShuffleMask();
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index 464ff0864d146..fa1f52027411d 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -8244,7 +8244,7 @@ static Register getMemsetValue(Register Val, LLT Ty, MachineIRBuilder &MIB) {
// For vector types create a G_BUILD_VECTOR.
if (Ty.isVector())
- Val = MIB.buildSplatVector(Ty, Val).getReg(0);
+ Val = MIB.buildBuildVectorSplatVector(Ty, Val).getReg(0);
return Val;
}
diff --git a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
index a5827c26c04f4..4f244bea33501 100644
--- a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
@@ -321,7 +321,7 @@ MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
auto Const = buildInstr(TargetOpcode::G_CONSTANT)
.addDef(getMRI()->createGenericVirtualRegister(EltTy))
.addCImm(&Val);
- return buildSplatVector(Res, Const);
+ return buildBuildVectorSplatVector(Res, Const);
}
auto Const = buildInstr(TargetOpcode::G_CONSTANT);
@@ -358,7 +358,7 @@ MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
.addDef(getMRI()->createGenericVirtualRegister(EltTy))
.addFPImm(&Val);
- return buildSplatVector(Res, Const);
+ return buildBuildVectorSplatVector(Res, Const);
}
auto Const = buildInstr(TargetOpcode::G_FCONSTANT);
@@ -706,8 +706,9 @@ MachineIRBuilder::buildBuildVectorConstant(const DstOp &Res,
return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
}
-MachineInstrBuilder MachineIRBuilder::buildSplatVector(const DstOp &Res,
- const SrcOp &Src) {
+MachineInstrBuilder
+MachineIRBuilder::buildBuildVectorSplatVector(const DstOp &Res,
+ const SrcOp &Src) {
SmallVector<SrcOp, 8> TmpVec(Res.getLLTTy(*getMRI()).getNumElements(), Src);
return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
}
@@ -737,6 +738,14 @@ MachineInstrBuilder MachineIRBuilder::buildShuffleSplat(const DstOp &Res,
return buildShuffleVector(DstTy, InsElt, UndefVec, ZeroMask);
}
+MachineInstrBuilder MachineIRBuilder::buildSplatVector(const DstOp &Res,
+ const SrcOp &Src) {
+ LLT DstTy = Res.getLLTTy(*getMRI());
+ assert(Src.getLLTTy(*getMRI()) == DstTy.getElementType() &&
+ "Expected Src to match Dst elt ty");
+ return buildInstr(TargetOpcode::G_SPLAT_VECTOR, Res, Src);
+}
+
MachineInstrBuilder MachineIRBuilder::buildShuffleVector(const DstOp &Res,
const SrcOp &Src1,
const SrcOp &Src2,
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/shufflevector.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/shufflevector.ll
index 741f791ff3d05..df7778899b0d0 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/shufflevector.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/shufflevector.ll
@@ -8,15 +8,19 @@ define <vscale x 1 x i1> @shufflevector_nxv1i1_0() {
; RV32-LABEL: name: shufflevector_nxv1i1_0
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 1 x s1>), [[DEF]], shufflemask(undef)
- ; RV32-NEXT: $v0 = COPY [[SHUF]](<vscale x 1 x s1>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 1 x s1>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s1>) = G_SPLAT_VECTOR [[EVEC]](s1)
+ ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s1>)
; RV32-NEXT: PseudoRET implicit $v0
;
; RV64-LABEL: name: shufflevector_nxv1i1_0
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 1 x s1>), [[DEF]], shufflemask(undef)
- ; RV64-NEXT: $v0 = COPY [[SHUF]](<vscale x 1 x s1>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 1 x s1>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s1>) = G_SPLAT_VECTOR [[EVEC]](s1)
+ ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s1>)
; RV64-NEXT: PseudoRET implicit $v0
%a = shufflevector <vscale x 1 x i1> poison, <vscale x 1 x i1> poison, <vscale x 1 x i32> poison
ret <vscale x 1 x i1> %a
@@ -26,15 +30,19 @@ define <vscale x 1 x i1> @shufflevector_nxv1i1_1() {
; RV32-LABEL: name: shufflevector_nxv1i1_1
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 1 x s1>), [[DEF]], shufflemask(undef)
- ; RV32-NEXT: $v0 = COPY [[SHUF]](<vscale x 1 x s1>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 1 x s1>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s1>) = G_SPLAT_VECTOR [[EVEC]](s1)
+ ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s1>)
; RV32-NEXT: PseudoRET implicit $v0
;
; RV64-LABEL: name: shufflevector_nxv1i1_1
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 1 x s1>), [[DEF]], shufflemask(undef)
- ; RV64-NEXT: $v0 = COPY [[SHUF]](<vscale x 1 x s1>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 1 x s1>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s1>) = G_SPLAT_VECTOR [[EVEC]](s1)
+ ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s1>)
; RV64-NEXT: PseudoRET implicit $v0
%a = shufflevector <vscale x 1 x i1> undef, <vscale x 1 x i1> undef, <vscale x 1 x i32> undef
ret <vscale x 1 x i1> %a
@@ -46,9 +54,10 @@ define <vscale x 1 x i1> @shufflevector_nxv1i1_2(<vscale x 1 x i1> %a) {
; RV32-NEXT: liveins: $v0
; RV32-NEXT: {{ $}}
; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 1 x s1>), [[DEF]], shufflemask(0)
- ; RV32-NEXT: $v0 = COPY [[SHUF]](<vscale x 1 x s1>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 1 x s1>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s1>) = G_SPLAT_VECTOR [[EVEC]](s1)
+ ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s1>)
; RV32-NEXT: PseudoRET implicit $v0
;
; RV64-LABEL: name: shufflevector_nxv1i1_2
@@ -56,9 +65,10 @@ define <vscale x 1 x i1> @shufflevector_nxv1i1_2(<vscale x 1 x i1> %a) {
; RV64-NEXT: liveins: $v0
; RV64-NEXT: {{ $}}
; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 1 x s1>), [[DEF]], shufflemask(0)
- ; RV64-NEXT: $v0 = COPY [[SHUF]](<vscale x 1 x s1>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 1 x s1>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s1>) = G_SPLAT_VECTOR [[EVEC]](s1)
+ ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s1>)
; RV64-NEXT: PseudoRET implicit $v0
%b = shufflevector <vscale x 1 x i1> %a , <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
ret <vscale x 1 x i1> %b
@@ -68,15 +78,19 @@ define <vscale x 2 x i1> @shufflevector_nxv2i1_0() {
; RV32-LABEL: name: shufflevector_nxv2i1_0
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 2 x s1>), [[DEF]], shufflemask(undef, undef)
- ; RV32-NEXT: $v0 = COPY [[SHUF]](<vscale x 2 x s1>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 2 x s1>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s1>) = G_SPLAT_VECTOR [[EVEC]](s1)
+ ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s1>)
; RV32-NEXT: PseudoRET implicit $v0
;
; RV64-LABEL: name: shufflevector_nxv2i1_0
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 2 x s1>), [[DEF]], shufflemask(undef, undef)
- ; RV64-NEXT: $v0 = COPY [[SHUF]](<vscale x 2 x s1>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 2 x s1>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s1>) = G_SPLAT_VECTOR [[EVEC]](s1)
+ ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s1>)
; RV64-NEXT: PseudoRET implicit $v0
%a = shufflevector <vscale x 2 x i1> poison, <vscale x 2 x i1> poison, <vscale x 2 x i32> poison
ret <vscale x 2 x i1> %a
@@ -86,15 +100,19 @@ define <vscale x 2 x i1> @shufflevector_nxv2i1_1() {
; RV32-LABEL: name: shufflevector_nxv2i1_1
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 2 x s1>), [[DEF]], shufflemask(undef, undef)
- ; RV32-NEXT: $v0 = COPY [[SHUF]](<vscale x 2 x s1>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 2 x s1>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s1>) = G_SPLAT_VECTOR [[EVEC]](s1)
+ ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s1>)
; RV32-NEXT: PseudoRET implicit $v0
;
; RV64-LABEL: name: shufflevector_nxv2i1_1
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 2 x s1>), [[DEF]], shufflemask(undef, undef)
- ; RV64-NEXT: $v0 = COPY [[SHUF]](<vscale x 2 x s1>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 2 x s1>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s1>) = G_SPLAT_VECTOR [[EVEC]](s1)
+ ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s1>)
; RV64-NEXT: PseudoRET implicit $v0
%a = shufflevector <vscale x 2 x i1> undef, <vscale x 2 x i1> undef, <vscale x 2 x i32> undef
ret <vscale x 2 x i1> %a
@@ -106,9 +124,10 @@ define <vscale x 2 x i1> @shufflevector_nxv2i1_2(<vscale x 2 x i1> %a) {
; RV32-NEXT: liveins: $v0
; RV32-NEXT: {{ $}}
; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v0
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 2 x s1>), [[DEF]], shufflemask(0, 0)
- ; RV32-NEXT: $v0 = COPY [[SHUF]](<vscale x 2 x s1>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 2 x s1>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s1>) = G_SPLAT_VECTOR [[EVEC]](s1)
+ ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s1>)
; RV32-NEXT: PseudoRET implicit $v0
;
; RV64-LABEL: name: shufflevector_nxv2i1_2
@@ -116,9 +135,10 @@ define <vscale x 2 x i1> @shufflevector_nxv2i1_2(<vscale x 2 x i1> %a) {
; RV64-NEXT: liveins: $v0
; RV64-NEXT: {{ $}}
; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v0
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 2 x s1>), [[DEF]], shufflemask(0, 0)
- ; RV64-NEXT: $v0 = COPY [[SHUF]](<vscale x 2 x s1>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 2 x s1>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s1>) = G_SPLAT_VECTOR [[EVEC]](s1)
+ ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s1>)
; RV64-NEXT: PseudoRET implicit $v0
%b = shufflevector <vscale x 2 x i1> %a , <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
ret <vscale x 2 x i1> %b
@@ -128,15 +148,19 @@ define <vscale x 4 x i1> @shufflevector_nxv4i1_0() {
; RV32-LABEL: name: shufflevector_nxv4i1_0
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 4 x s1>), [[DEF]], shufflemask(undef, undef, undef, undef)
- ; RV32-NEXT: $v0 = COPY [[SHUF]](<vscale x 4 x s1>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 4 x s1>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s1>) = G_SPLAT_VECTOR [[EVEC]](s1)
+ ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s1>)
; RV32-NEXT: PseudoRET implicit $v0
;
; RV64-LABEL: name: shufflevector_nxv4i1_0
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 4 x s1>), [[DEF]], shufflemask(undef, undef, undef, undef)
- ; RV64-NEXT: $v0 = COPY [[SHUF]](<vscale x 4 x s1>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 4 x s1>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s1>) = G_SPLAT_VECTOR [[EVEC]](s1)
+ ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s1>)
; RV64-NEXT: PseudoRET implicit $v0
%a = shufflevector <vscale x 4 x i1> poison, <vscale x 4 x i1> poison, <vscale x 4 x i32> poison
ret <vscale x 4 x i1> %a
@@ -146,15 +170,19 @@ define <vscale x 4 x i1> @shufflevector_nxv4i1_1() {
; RV32-LABEL: name: shufflevector_nxv4i1_1
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 4 x s1>), [[DEF]], shufflemask(undef, undef, undef, undef)
- ; RV32-NEXT: $v0 = COPY [[SHUF]](<vscale x 4 x s1>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 4 x s1>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s1>) = G_SPLAT_VECTOR [[EVEC]](s1)
+ ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s1>)
; RV32-NEXT: PseudoRET implicit $v0
;
; RV64-LABEL: name: shufflevector_nxv4i1_1
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 4 x s1>), [[DEF]], shufflemask(undef, undef, undef, undef)
- ; RV64-NEXT: $v0 = COPY [[SHUF]](<vscale x 4 x s1>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 4 x s1>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s1>) = G_SPLAT_VECTOR [[EVEC]](s1)
+ ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s1>)
; RV64-NEXT: PseudoRET implicit $v0
%a = shufflevector <vscale x 4 x i1> undef, <vscale x 4 x i1> undef, <vscale x 4 x i32> undef
ret <vscale x 4 x i1> %a
@@ -166,9 +194,10 @@ define <vscale x 4 x i1> @shufflevector_nxv4i1_2(<vscale x 4 x i1> %a) {
; RV32-NEXT: liveins: $v0
; RV32-NEXT: {{ $}}
; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v0
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 4 x s1>), [[DEF]], shufflemask(0, 0, 0, 0)
- ; RV32-NEXT: $v0 = COPY [[SHUF]](<vscale x 4 x s1>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 4 x s1>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s1>) = G_SPLAT_VECTOR [[EVEC]](s1)
+ ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s1>)
; RV32-NEXT: PseudoRET implicit $v0
;
; RV64-LABEL: name: shufflevector_nxv4i1_2
@@ -176,9 +205,10 @@ define <vscale x 4 x i1> @shufflevector_nxv4i1_2(<vscale x 4 x i1> %a) {
; RV64-NEXT: liveins: $v0
; RV64-NEXT: {{ $}}
; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v0
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 4 x s1>), [[DEF]], shufflemask(0, 0, 0, 0)
- ; RV64-NEXT: $v0 = COPY [[SHUF]](<vscale x 4 x s1>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 4 x s1>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s1>) = G_SPLAT_VECTOR [[EVEC]](s1)
+ ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s1>)
; RV64-NEXT: PseudoRET implicit $v0
%b = shufflevector <vscale x 4 x i1> %a , <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
ret <vscale x 4 x i1> %b
@@ -188,15 +218,19 @@ define <vscale x 8 x i1> @shufflevector_nxv8i1_0() {
; RV32-LABEL: name: shufflevector_nxv8i1_0
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 8 x s1>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef)
- ; RV32-NEXT: $v0 = COPY [[SHUF]](<vscale x 8 x s1>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 8 x s1>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s1>) = G_SPLAT_VECTOR [[EVEC]](s1)
+ ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s1>)
; RV32-NEXT: PseudoRET implicit $v0
;
; RV64-LABEL: name: shufflevector_nxv8i1_0
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 8 x s1>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef)
- ; RV64-NEXT: $v0 = COPY [[SHUF]](<vscale x 8 x s1>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 8 x s1>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s1>) = G_SPLAT_VECTOR [[EVEC]](s1)
+ ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s1>)
; RV64-NEXT: PseudoRET implicit $v0
%a = shufflevector <vscale x 8 x i1> poison, <vscale x 8 x i1> poison, <vscale x 8 x i32> poison
ret <vscale x 8 x i1> %a
@@ -206,15 +240,19 @@ define <vscale x 8 x i1> @shufflevector_nxv8i1_1() {
; RV32-LABEL: name: shufflevector_nxv8i1_1
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 8 x s1>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef)
- ; RV32-NEXT: $v0 = COPY [[SHUF]](<vscale x 8 x s1>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 8 x s1>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s1>) = G_SPLAT_VECTOR [[EVEC]](s1)
+ ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s1>)
; RV32-NEXT: PseudoRET implicit $v0
;
; RV64-LABEL: name: shufflevector_nxv8i1_1
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 8 x s1>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef)
- ; RV64-NEXT: $v0 = COPY [[SHUF]](<vscale x 8 x s1>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 8 x s1>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s1>) = G_SPLAT_VECTOR [[EVEC]](s1)
+ ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s1>)
; RV64-NEXT: PseudoRET implicit $v0
%a = shufflevector <vscale x 8 x i1> undef, <vscale x 8 x i1> undef, <vscale x 8 x i32> undef
ret <vscale x 8 x i1> %a
@@ -226,9 +264,10 @@ define <vscale x 8 x i1> @shufflevector_nxv8i1_2(<vscale x 8 x i1> %a) {
; RV32-NEXT: liveins: $v0
; RV32-NEXT: {{ $}}
; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v0
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 8 x s1>), [[DEF]], shufflemask(0, 0, 0, 0, 0, 0, 0, 0)
- ; RV32-NEXT: $v0 = COPY [[SHUF]](<vscale x 8 x s1>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 8 x s1>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s1>) = G_SPLAT_VECTOR [[EVEC]](s1)
+ ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s1>)
; RV32-NEXT: PseudoRET implicit $v0
;
; RV64-LABEL: name: shufflevector_nxv8i1_2
@@ -236,9 +275,10 @@ define <vscale x 8 x i1> @shufflevector_nxv8i1_2(<vscale x 8 x i1> %a) {
; RV64-NEXT: liveins: $v0
; RV64-NEXT: {{ $}}
; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v0
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 8 x s1>), [[DEF]], shufflemask(0, 0, 0, 0, 0, 0, 0, 0)
- ; RV64-NEXT: $v0 = COPY [[SHUF]](<vscale x 8 x s1>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 8 x s1>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s1>) = G_SPLAT_VECTOR [[EVEC]](s1)
+ ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s1>)
; RV64-NEXT: PseudoRET implicit $v0
%b = shufflevector <vscale x 8 x i1> %a , <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
ret <vscale x 8 x i1> %b
@@ -248,15 +288,19 @@ define <vscale x 16 x i1> @shufflevector_nxv16i1_0() {
; RV32-LABEL: name: shufflevector_nxv16i1_0
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 16 x s1>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef)
- ; RV32-NEXT: $v0 = COPY [[SHUF]](<vscale x 16 x s1>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 16 x s1>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s1>) = G_SPLAT_VECTOR [[EVEC]](s1)
+ ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s1>)
; RV32-NEXT: PseudoRET implicit $v0
;
; RV64-LABEL: name: shufflevector_nxv16i1_0
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 16 x s1>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef)
- ; RV64-NEXT: $v0 = COPY [[SHUF]](<vscale x 16 x s1>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 16 x s1>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s1>) = G_SPLAT_VECTOR [[EVEC]](s1)
+ ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s1>)
; RV64-NEXT: PseudoRET implicit $v0
%a = shufflevector <vscale x 16 x i1> poison, <vscale x 16 x i1> poison, <vscale x 16 x i32> poison
ret <vscale x 16 x i1> %a
@@ -266,15 +310,19 @@ define <vscale x 16 x i1> @shufflevector_nxv16i1_1() {
; RV32-LABEL: name: shufflevector_nxv16i1_1
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 16 x s1>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef)
- ; RV32-NEXT: $v0 = COPY [[SHUF]](<vscale x 16 x s1>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 16 x s1>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s1>) = G_SPLAT_VECTOR [[EVEC]](s1)
+ ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s1>)
; RV32-NEXT: PseudoRET implicit $v0
;
; RV64-LABEL: name: shufflevector_nxv16i1_1
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 16 x s1>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef)
- ; RV64-NEXT: $v0 = COPY [[SHUF]](<vscale x 16 x s1>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 16 x s1>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s1>) = G_SPLAT_VECTOR [[EVEC]](s1)
+ ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s1>)
; RV64-NEXT: PseudoRET implicit $v0
%a = shufflevector <vscale x 16 x i1> undef, <vscale x 16 x i1> undef, <vscale x 16 x i32> undef
ret <vscale x 16 x i1> %a
@@ -286,9 +334,10 @@ define <vscale x 16 x i1> @shufflevector_nxv16i1_2(<vscale x 16 x i1> %a) {
; RV32-NEXT: liveins: $v0
; RV32-NEXT: {{ $}}
; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v0
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 16 x s1>), [[DEF]], shufflemask(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
- ; RV32-NEXT: $v0 = COPY [[SHUF]](<vscale x 16 x s1>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 16 x s1>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s1>) = G_SPLAT_VECTOR [[EVEC]](s1)
+ ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s1>)
; RV32-NEXT: PseudoRET implicit $v0
;
; RV64-LABEL: name: shufflevector_nxv16i1_2
@@ -296,9 +345,10 @@ define <vscale x 16 x i1> @shufflevector_nxv16i1_2(<vscale x 16 x i1> %a) {
; RV64-NEXT: liveins: $v0
; RV64-NEXT: {{ $}}
; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v0
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 16 x s1>), [[DEF]], shufflemask(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
- ; RV64-NEXT: $v0 = COPY [[SHUF]](<vscale x 16 x s1>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s1) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 16 x s1>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s1>) = G_SPLAT_VECTOR [[EVEC]](s1)
+ ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s1>)
; RV64-NEXT: PseudoRET implicit $v0
%b = shufflevector <vscale x 16 x i1> %a , <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
ret <vscale x 16 x i1> %b
@@ -308,15 +358,19 @@ define <vscale x 1 x i8> @shufflevector_nxv1i8_0() {
; RV32-LABEL: name: shufflevector_nxv1i8_0
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 1 x s8>), [[DEF]], shufflemask(undef)
- ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 1 x s8>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 1 x s8>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[EVEC]](s8)
+ ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s8>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: shufflevector_nxv1i8_0
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 1 x s8>), [[DEF]], shufflemask(undef)
- ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 1 x s8>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 1 x s8>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[EVEC]](s8)
+ ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s8>)
; RV64-NEXT: PseudoRET implicit $v8
%a = shufflevector <vscale x 1 x i8> poison, <vscale x 1 x i8> poison, <vscale x 1 x i32> poison
ret <vscale x 1 x i8> %a
@@ -326,15 +380,19 @@ define <vscale x 1 x i8> @shufflevector_nxv1i8_1() {
; RV32-LABEL: name: shufflevector_nxv1i8_1
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 1 x s8>), [[DEF]], shufflemask(undef)
- ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 1 x s8>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 1 x s8>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[EVEC]](s8)
+ ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s8>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: shufflevector_nxv1i8_1
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 1 x s8>), [[DEF]], shufflemask(undef)
- ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 1 x s8>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 1 x s8>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[EVEC]](s8)
+ ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s8>)
; RV64-NEXT: PseudoRET implicit $v8
%a = shufflevector <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i32> undef
ret <vscale x 1 x i8> %a
@@ -346,9 +404,10 @@ define <vscale x 1 x i8> @shufflevector_nxv1i8_2(<vscale x 1 x i8> %a) {
; RV32-NEXT: liveins: $v8
; RV32-NEXT: {{ $}}
; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 1 x s8>), [[DEF]], shufflemask(0)
- ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 1 x s8>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 1 x s8>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[EVEC]](s8)
+ ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s8>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: shufflevector_nxv1i8_2
@@ -356,9 +415,10 @@ define <vscale x 1 x i8> @shufflevector_nxv1i8_2(<vscale x 1 x i8> %a) {
; RV64-NEXT: liveins: $v8
; RV64-NEXT: {{ $}}
; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 1 x s8>), [[DEF]], shufflemask(0)
- ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 1 x s8>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 1 x s8>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[EVEC]](s8)
+ ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s8>)
; RV64-NEXT: PseudoRET implicit $v8
%b = shufflevector <vscale x 1 x i8> %a , <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
ret <vscale x 1 x i8> %b
@@ -368,15 +428,19 @@ define <vscale x 2 x i8> @shufflevector_nxv2i8_0() {
; RV32-LABEL: name: shufflevector_nxv2i8_0
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 2 x s8>), [[DEF]], shufflemask(undef, undef)
- ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 2 x s8>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 2 x s8>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[EVEC]](s8)
+ ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s8>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: shufflevector_nxv2i8_0
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 2 x s8>), [[DEF]], shufflemask(undef, undef)
- ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 2 x s8>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 2 x s8>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[EVEC]](s8)
+ ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s8>)
; RV64-NEXT: PseudoRET implicit $v8
%a = shufflevector <vscale x 2 x i8> poison, <vscale x 2 x i8> poison, <vscale x 2 x i32> poison
ret <vscale x 2 x i8> %a
@@ -386,15 +450,19 @@ define <vscale x 2 x i8> @shufflevector_nxv2i8_1() {
; RV32-LABEL: name: shufflevector_nxv2i8_1
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 2 x s8>), [[DEF]], shufflemask(undef, undef)
- ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 2 x s8>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 2 x s8>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[EVEC]](s8)
+ ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s8>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: shufflevector_nxv2i8_1
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 2 x s8>), [[DEF]], shufflemask(undef, undef)
- ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 2 x s8>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 2 x s8>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[EVEC]](s8)
+ ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s8>)
; RV64-NEXT: PseudoRET implicit $v8
%a = shufflevector <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i32> undef
ret <vscale x 2 x i8> %a
@@ -406,9 +474,10 @@ define <vscale x 2 x i8> @shufflevector_nxv2i8_2(<vscale x 2 x i8> %a) {
; RV32-NEXT: liveins: $v8
; RV32-NEXT: {{ $}}
; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 2 x s8>), [[DEF]], shufflemask(0, 0)
- ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 2 x s8>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 2 x s8>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[EVEC]](s8)
+ ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s8>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: shufflevector_nxv2i8_2
@@ -416,9 +485,10 @@ define <vscale x 2 x i8> @shufflevector_nxv2i8_2(<vscale x 2 x i8> %a) {
; RV64-NEXT: liveins: $v8
; RV64-NEXT: {{ $}}
; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 2 x s8>), [[DEF]], shufflemask(0, 0)
- ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 2 x s8>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 2 x s8>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[EVEC]](s8)
+ ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s8>)
; RV64-NEXT: PseudoRET implicit $v8
%b = shufflevector <vscale x 2 x i8> %a , <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
ret <vscale x 2 x i8> %b
@@ -428,15 +498,19 @@ define <vscale x 4 x i8> @shufflevector_nxv4i8_0() {
; RV32-LABEL: name: shufflevector_nxv4i8_0
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 4 x s8>), [[DEF]], shufflemask(undef, undef, undef, undef)
- ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 4 x s8>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 4 x s8>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[EVEC]](s8)
+ ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s8>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: shufflevector_nxv4i8_0
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 4 x s8>), [[DEF]], shufflemask(undef, undef, undef, undef)
- ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 4 x s8>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 4 x s8>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[EVEC]](s8)
+ ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s8>)
; RV64-NEXT: PseudoRET implicit $v8
%a = shufflevector <vscale x 4 x i8> poison, <vscale x 4 x i8> poison, <vscale x 4 x i32> poison
ret <vscale x 4 x i8> %a
@@ -446,15 +520,19 @@ define <vscale x 4 x i8> @shufflevector_nxv4i8_1() {
; RV32-LABEL: name: shufflevector_nxv4i8_1
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 4 x s8>), [[DEF]], shufflemask(undef, undef, undef, undef)
- ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 4 x s8>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 4 x s8>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[EVEC]](s8)
+ ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s8>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: shufflevector_nxv4i8_1
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 4 x s8>), [[DEF]], shufflemask(undef, undef, undef, undef)
- ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 4 x s8>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 4 x s8>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[EVEC]](s8)
+ ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s8>)
; RV64-NEXT: PseudoRET implicit $v8
%a = shufflevector <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i32> undef
ret <vscale x 4 x i8> %a
@@ -466,9 +544,10 @@ define <vscale x 4 x i8> @shufflevector_nxv4i8_2(<vscale x 4 x i8> %a) {
; RV32-NEXT: liveins: $v8
; RV32-NEXT: {{ $}}
; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 4 x s8>), [[DEF]], shufflemask(0, 0, 0, 0)
- ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 4 x s8>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 4 x s8>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[EVEC]](s8)
+ ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s8>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: shufflevector_nxv4i8_2
@@ -476,9 +555,10 @@ define <vscale x 4 x i8> @shufflevector_nxv4i8_2(<vscale x 4 x i8> %a) {
; RV64-NEXT: liveins: $v8
; RV64-NEXT: {{ $}}
; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 4 x s8>), [[DEF]], shufflemask(0, 0, 0, 0)
- ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 4 x s8>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 4 x s8>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[EVEC]](s8)
+ ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s8>)
; RV64-NEXT: PseudoRET implicit $v8
%b = shufflevector <vscale x 4 x i8> %a , <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
ret <vscale x 4 x i8> %b
@@ -488,15 +568,19 @@ define <vscale x 8 x i8> @shufflevector_nxv8i8_0() {
; RV32-LABEL: name: shufflevector_nxv8i8_0
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 8 x s8>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef)
- ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 8 x s8>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 8 x s8>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[EVEC]](s8)
+ ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s8>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: shufflevector_nxv8i8_0
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 8 x s8>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef)
- ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 8 x s8>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 8 x s8>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[EVEC]](s8)
+ ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s8>)
; RV64-NEXT: PseudoRET implicit $v8
%a = shufflevector <vscale x 8 x i8> poison, <vscale x 8 x i8> poison, <vscale x 8 x i32> poison
ret <vscale x 8 x i8> %a
@@ -506,15 +590,19 @@ define <vscale x 8 x i8> @shufflevector_nxv8i8_1() {
; RV32-LABEL: name: shufflevector_nxv8i8_1
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 8 x s8>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef)
- ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 8 x s8>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 8 x s8>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[EVEC]](s8)
+ ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s8>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: shufflevector_nxv8i8_1
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 8 x s8>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef)
- ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 8 x s8>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 8 x s8>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[EVEC]](s8)
+ ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s8>)
; RV64-NEXT: PseudoRET implicit $v8
%a = shufflevector <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i32> undef
ret <vscale x 8 x i8> %a
@@ -526,9 +614,10 @@ define <vscale x 8 x i8> @shufflevector_nxv8i8_2(<vscale x 8 x i8> %a) {
; RV32-NEXT: liveins: $v8
; RV32-NEXT: {{ $}}
; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 8 x s8>), [[DEF]], shufflemask(0, 0, 0, 0, 0, 0, 0, 0)
- ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 8 x s8>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 8 x s8>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[EVEC]](s8)
+ ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s8>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: shufflevector_nxv8i8_2
@@ -536,9 +625,10 @@ define <vscale x 8 x i8> @shufflevector_nxv8i8_2(<vscale x 8 x i8> %a) {
; RV64-NEXT: liveins: $v8
; RV64-NEXT: {{ $}}
; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 8 x s8>), [[DEF]], shufflemask(0, 0, 0, 0, 0, 0, 0, 0)
- ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 8 x s8>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 8 x s8>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[EVEC]](s8)
+ ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s8>)
; RV64-NEXT: PseudoRET implicit $v8
%b = shufflevector <vscale x 8 x i8> %a , <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
ret <vscale x 8 x i8> %b
@@ -548,15 +638,19 @@ define <vscale x 16 x i8> @shufflevector_nxv16i8_0() {
; RV32-LABEL: name: shufflevector_nxv16i8_0
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 16 x s8>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef)
- ; RV32-NEXT: $v8m2 = COPY [[SHUF]](<vscale x 16 x s8>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 16 x s8>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[EVEC]](s8)
+ ; RV32-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s8>)
; RV32-NEXT: PseudoRET implicit $v8m2
;
; RV64-LABEL: name: shufflevector_nxv16i8_0
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 16 x s8>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef)
- ; RV64-NEXT: $v8m2 = COPY [[SHUF]](<vscale x 16 x s8>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 16 x s8>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[EVEC]](s8)
+ ; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s8>)
; RV64-NEXT: PseudoRET implicit $v8m2
%a = shufflevector <vscale x 16 x i8> poison, <vscale x 16 x i8> poison, <vscale x 16 x i32> poison
ret <vscale x 16 x i8> %a
@@ -566,15 +660,19 @@ define <vscale x 16 x i8> @shufflevector_nxv16i8_1() {
; RV32-LABEL: name: shufflevector_nxv16i8_1
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 16 x s8>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef)
- ; RV32-NEXT: $v8m2 = COPY [[SHUF]](<vscale x 16 x s8>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 16 x s8>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[EVEC]](s8)
+ ; RV32-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s8>)
; RV32-NEXT: PseudoRET implicit $v8m2
;
; RV64-LABEL: name: shufflevector_nxv16i8_1
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 16 x s8>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef)
- ; RV64-NEXT: $v8m2 = COPY [[SHUF]](<vscale x 16 x s8>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 16 x s8>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[EVEC]](s8)
+ ; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s8>)
; RV64-NEXT: PseudoRET implicit $v8m2
%a = shufflevector <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i32> undef
ret <vscale x 16 x i8> %a
@@ -586,9 +684,10 @@ define <vscale x 16 x i8> @shufflevector_nxv16i8_2(<vscale x 16 x i8> %a) {
; RV32-NEXT: liveins: $v8m2
; RV32-NEXT: {{ $}}
; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 16 x s8>), [[DEF]], shufflemask(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
- ; RV32-NEXT: $v8m2 = COPY [[SHUF]](<vscale x 16 x s8>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 16 x s8>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[EVEC]](s8)
+ ; RV32-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s8>)
; RV32-NEXT: PseudoRET implicit $v8m2
;
; RV64-LABEL: name: shufflevector_nxv16i8_2
@@ -596,9 +695,10 @@ define <vscale x 16 x i8> @shufflevector_nxv16i8_2(<vscale x 16 x i8> %a) {
; RV64-NEXT: liveins: $v8m2
; RV64-NEXT: {{ $}}
; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 16 x s8>), [[DEF]], shufflemask(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
- ; RV64-NEXT: $v8m2 = COPY [[SHUF]](<vscale x 16 x s8>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s8) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 16 x s8>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[EVEC]](s8)
+ ; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s8>)
; RV64-NEXT: PseudoRET implicit $v8m2
%b = shufflevector <vscale x 16 x i8> %a , <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
ret <vscale x 16 x i8> %b
@@ -608,15 +708,19 @@ define <vscale x 1 x i16> @shufflevector_nxv1i16_0() {
; RV32-LABEL: name: shufflevector_nxv1i16_0
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 1 x s16>), [[DEF]], shufflemask(undef)
- ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 1 x s16>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 1 x s16>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[EVEC]](s16)
+ ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s16>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: shufflevector_nxv1i16_0
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 1 x s16>), [[DEF]], shufflemask(undef)
- ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 1 x s16>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 1 x s16>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[EVEC]](s16)
+ ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s16>)
; RV64-NEXT: PseudoRET implicit $v8
%a = shufflevector <vscale x 1 x i16> poison, <vscale x 1 x i16> poison, <vscale x 1 x i32> poison
ret <vscale x 1 x i16> %a
@@ -626,15 +730,19 @@ define <vscale x 1 x i16> @shufflevector_nxv1i16_1() {
; RV32-LABEL: name: shufflevector_nxv1i16_1
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 1 x s16>), [[DEF]], shufflemask(undef)
- ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 1 x s16>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 1 x s16>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[EVEC]](s16)
+ ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s16>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: shufflevector_nxv1i16_1
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 1 x s16>), [[DEF]], shufflemask(undef)
- ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 1 x s16>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 1 x s16>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[EVEC]](s16)
+ ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s16>)
; RV64-NEXT: PseudoRET implicit $v8
%a = shufflevector <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i32> undef
ret <vscale x 1 x i16> %a
@@ -646,9 +754,10 @@ define <vscale x 1 x i16> @shufflevector_nxv1i16_2(<vscale x 1 x i16> %a) {
; RV32-NEXT: liveins: $v8
; RV32-NEXT: {{ $}}
; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 1 x s16>), [[DEF]], shufflemask(0)
- ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 1 x s16>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 1 x s16>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[EVEC]](s16)
+ ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s16>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: shufflevector_nxv1i16_2
@@ -656,9 +765,10 @@ define <vscale x 1 x i16> @shufflevector_nxv1i16_2(<vscale x 1 x i16> %a) {
; RV64-NEXT: liveins: $v8
; RV64-NEXT: {{ $}}
; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 1 x s16>), [[DEF]], shufflemask(0)
- ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 1 x s16>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 1 x s16>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[EVEC]](s16)
+ ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s16>)
; RV64-NEXT: PseudoRET implicit $v8
%b = shufflevector <vscale x 1 x i16> %a , <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
ret <vscale x 1 x i16> %b
@@ -668,15 +778,19 @@ define <vscale x 2 x i16> @shufflevector_nxv2i16_0() {
; RV32-LABEL: name: shufflevector_nxv2i16_0
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 2 x s16>), [[DEF]], shufflemask(undef, undef)
- ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 2 x s16>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 2 x s16>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[EVEC]](s16)
+ ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s16>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: shufflevector_nxv2i16_0
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 2 x s16>), [[DEF]], shufflemask(undef, undef)
- ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 2 x s16>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 2 x s16>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[EVEC]](s16)
+ ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s16>)
; RV64-NEXT: PseudoRET implicit $v8
%a = shufflevector <vscale x 2 x i16> poison, <vscale x 2 x i16> poison, <vscale x 2 x i32> poison
ret <vscale x 2 x i16> %a
@@ -686,15 +800,19 @@ define <vscale x 2 x i16> @shufflevector_nxv2i16_1() {
; RV32-LABEL: name: shufflevector_nxv2i16_1
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 2 x s16>), [[DEF]], shufflemask(undef, undef)
- ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 2 x s16>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 2 x s16>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[EVEC]](s16)
+ ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s16>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: shufflevector_nxv2i16_1
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 2 x s16>), [[DEF]], shufflemask(undef, undef)
- ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 2 x s16>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 2 x s16>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[EVEC]](s16)
+ ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s16>)
; RV64-NEXT: PseudoRET implicit $v8
%a = shufflevector <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i32> undef
ret <vscale x 2 x i16> %a
@@ -706,9 +824,10 @@ define <vscale x 2 x i16> @shufflevector_nxv2i16_2(<vscale x 2 x i16> %a) {
; RV32-NEXT: liveins: $v8
; RV32-NEXT: {{ $}}
; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 2 x s16>), [[DEF]], shufflemask(0, 0)
- ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 2 x s16>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 2 x s16>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[EVEC]](s16)
+ ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s16>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: shufflevector_nxv2i16_2
@@ -716,9 +835,10 @@ define <vscale x 2 x i16> @shufflevector_nxv2i16_2(<vscale x 2 x i16> %a) {
; RV64-NEXT: liveins: $v8
; RV64-NEXT: {{ $}}
; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 2 x s16>), [[DEF]], shufflemask(0, 0)
- ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 2 x s16>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 2 x s16>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[EVEC]](s16)
+ ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s16>)
; RV64-NEXT: PseudoRET implicit $v8
%b = shufflevector <vscale x 2 x i16> %a , <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
ret <vscale x 2 x i16> %b
@@ -728,15 +848,19 @@ define <vscale x 4 x i16> @shufflevector_nxv4i16_0() {
; RV32-LABEL: name: shufflevector_nxv4i16_0
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 4 x s16>), [[DEF]], shufflemask(undef, undef, undef, undef)
- ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 4 x s16>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 4 x s16>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[EVEC]](s16)
+ ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s16>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: shufflevector_nxv4i16_0
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 4 x s16>), [[DEF]], shufflemask(undef, undef, undef, undef)
- ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 4 x s16>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 4 x s16>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[EVEC]](s16)
+ ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s16>)
; RV64-NEXT: PseudoRET implicit $v8
%a = shufflevector <vscale x 4 x i16> poison, <vscale x 4 x i16> poison, <vscale x 4 x i32> poison
ret <vscale x 4 x i16> %a
@@ -746,15 +870,19 @@ define <vscale x 4 x i16> @shufflevector_nxv4i16_1() {
; RV32-LABEL: name: shufflevector_nxv4i16_1
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 4 x s16>), [[DEF]], shufflemask(undef, undef, undef, undef)
- ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 4 x s16>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 4 x s16>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[EVEC]](s16)
+ ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s16>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: shufflevector_nxv4i16_1
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 4 x s16>), [[DEF]], shufflemask(undef, undef, undef, undef)
- ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 4 x s16>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 4 x s16>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[EVEC]](s16)
+ ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s16>)
; RV64-NEXT: PseudoRET implicit $v8
%a = shufflevector <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i32> undef
ret <vscale x 4 x i16> %a
@@ -766,9 +894,10 @@ define <vscale x 4 x i16> @shufflevector_nxv4i16_2(<vscale x 4 x i16> %a) {
; RV32-NEXT: liveins: $v8
; RV32-NEXT: {{ $}}
; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 4 x s16>), [[DEF]], shufflemask(0, 0, 0, 0)
- ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 4 x s16>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 4 x s16>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[EVEC]](s16)
+ ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s16>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: shufflevector_nxv4i16_2
@@ -776,9 +905,10 @@ define <vscale x 4 x i16> @shufflevector_nxv4i16_2(<vscale x 4 x i16> %a) {
; RV64-NEXT: liveins: $v8
; RV64-NEXT: {{ $}}
; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 4 x s16>), [[DEF]], shufflemask(0, 0, 0, 0)
- ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 4 x s16>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 4 x s16>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[EVEC]](s16)
+ ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s16>)
; RV64-NEXT: PseudoRET implicit $v8
%b = shufflevector <vscale x 4 x i16> %a , <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
ret <vscale x 4 x i16> %b
@@ -788,15 +918,19 @@ define <vscale x 8 x i16> @shufflevector_nxv8i16_0() {
; RV32-LABEL: name: shufflevector_nxv8i16_0
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 8 x s16>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef)
- ; RV32-NEXT: $v8m2 = COPY [[SHUF]](<vscale x 8 x s16>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 8 x s16>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[EVEC]](s16)
+ ; RV32-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s16>)
; RV32-NEXT: PseudoRET implicit $v8m2
;
; RV64-LABEL: name: shufflevector_nxv8i16_0
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 8 x s16>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef)
- ; RV64-NEXT: $v8m2 = COPY [[SHUF]](<vscale x 8 x s16>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 8 x s16>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[EVEC]](s16)
+ ; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s16>)
; RV64-NEXT: PseudoRET implicit $v8m2
%a = shufflevector <vscale x 8 x i16> poison, <vscale x 8 x i16> poison, <vscale x 8 x i32> poison
ret <vscale x 8 x i16> %a
@@ -806,15 +940,19 @@ define <vscale x 8 x i16> @shufflevector_nxv8i16_1() {
; RV32-LABEL: name: shufflevector_nxv8i16_1
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 8 x s16>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef)
- ; RV32-NEXT: $v8m2 = COPY [[SHUF]](<vscale x 8 x s16>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 8 x s16>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[EVEC]](s16)
+ ; RV32-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s16>)
; RV32-NEXT: PseudoRET implicit $v8m2
;
; RV64-LABEL: name: shufflevector_nxv8i16_1
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 8 x s16>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef)
- ; RV64-NEXT: $v8m2 = COPY [[SHUF]](<vscale x 8 x s16>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 8 x s16>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[EVEC]](s16)
+ ; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s16>)
; RV64-NEXT: PseudoRET implicit $v8m2
%a = shufflevector <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i32> undef
ret <vscale x 8 x i16> %a
@@ -826,9 +964,10 @@ define <vscale x 8 x i16> @shufflevector_nxv8i16_2(<vscale x 8 x i16> %a) {
; RV32-NEXT: liveins: $v8m2
; RV32-NEXT: {{ $}}
; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 8 x s16>), [[DEF]], shufflemask(0, 0, 0, 0, 0, 0, 0, 0)
- ; RV32-NEXT: $v8m2 = COPY [[SHUF]](<vscale x 8 x s16>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 8 x s16>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[EVEC]](s16)
+ ; RV32-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s16>)
; RV32-NEXT: PseudoRET implicit $v8m2
;
; RV64-LABEL: name: shufflevector_nxv8i16_2
@@ -836,9 +975,10 @@ define <vscale x 8 x i16> @shufflevector_nxv8i16_2(<vscale x 8 x i16> %a) {
; RV64-NEXT: liveins: $v8m2
; RV64-NEXT: {{ $}}
; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 8 x s16>), [[DEF]], shufflemask(0, 0, 0, 0, 0, 0, 0, 0)
- ; RV64-NEXT: $v8m2 = COPY [[SHUF]](<vscale x 8 x s16>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 8 x s16>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[EVEC]](s16)
+ ; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s16>)
; RV64-NEXT: PseudoRET implicit $v8m2
%b = shufflevector <vscale x 8 x i16> %a , <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
ret <vscale x 8 x i16> %b
@@ -848,15 +988,19 @@ define <vscale x 16 x i16> @shufflevector_nxv16i16_0() {
; RV32-LABEL: name: shufflevector_nxv16i16_0
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 16 x s16>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef)
- ; RV32-NEXT: $v8m4 = COPY [[SHUF]](<vscale x 16 x s16>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 16 x s16>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[EVEC]](s16)
+ ; RV32-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s16>)
; RV32-NEXT: PseudoRET implicit $v8m4
;
; RV64-LABEL: name: shufflevector_nxv16i16_0
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 16 x s16>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef)
- ; RV64-NEXT: $v8m4 = COPY [[SHUF]](<vscale x 16 x s16>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 16 x s16>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[EVEC]](s16)
+ ; RV64-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s16>)
; RV64-NEXT: PseudoRET implicit $v8m4
%a = shufflevector <vscale x 16 x i16> poison, <vscale x 16 x i16> poison, <vscale x 16 x i32> poison
ret <vscale x 16 x i16> %a
@@ -866,15 +1010,19 @@ define <vscale x 16 x i16> @shufflevector_nxv16i16_1() {
; RV32-LABEL: name: shufflevector_nxv16i16_1
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 16 x s16>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef)
- ; RV32-NEXT: $v8m4 = COPY [[SHUF]](<vscale x 16 x s16>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 16 x s16>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[EVEC]](s16)
+ ; RV32-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s16>)
; RV32-NEXT: PseudoRET implicit $v8m4
;
; RV64-LABEL: name: shufflevector_nxv16i16_1
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 16 x s16>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef)
- ; RV64-NEXT: $v8m4 = COPY [[SHUF]](<vscale x 16 x s16>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 16 x s16>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[EVEC]](s16)
+ ; RV64-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s16>)
; RV64-NEXT: PseudoRET implicit $v8m4
%a = shufflevector <vscale x 16 x i16> undef, <vscale x 16 x i16> undef, <vscale x 16 x i32> undef
ret <vscale x 16 x i16> %a
@@ -886,9 +1034,10 @@ define <vscale x 16 x i16> @shufflevector_nxv16i16_2(<vscale x 16 x i16> %a) {
; RV32-NEXT: liveins: $v8m4
; RV32-NEXT: {{ $}}
; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 16 x s16>), [[DEF]], shufflemask(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
- ; RV32-NEXT: $v8m4 = COPY [[SHUF]](<vscale x 16 x s16>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 16 x s16>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[EVEC]](s16)
+ ; RV32-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s16>)
; RV32-NEXT: PseudoRET implicit $v8m4
;
; RV64-LABEL: name: shufflevector_nxv16i16_2
@@ -896,9 +1045,10 @@ define <vscale x 16 x i16> @shufflevector_nxv16i16_2(<vscale x 16 x i16> %a) {
; RV64-NEXT: liveins: $v8m4
; RV64-NEXT: {{ $}}
; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 16 x s16>), [[DEF]], shufflemask(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
- ; RV64-NEXT: $v8m4 = COPY [[SHUF]](<vscale x 16 x s16>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s16) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 16 x s16>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[EVEC]](s16)
+ ; RV64-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s16>)
; RV64-NEXT: PseudoRET implicit $v8m4
%b = shufflevector <vscale x 16 x i16> %a , <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
ret <vscale x 16 x i16> %b
@@ -908,15 +1058,19 @@ define <vscale x 1 x i32> @shufflevector_nxv1i32_0() {
; RV32-LABEL: name: shufflevector_nxv1i32_0
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 1 x s32>), [[DEF]], shufflemask(undef)
- ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 1 x s32>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 1 x s32>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[EVEC]](s32)
+ ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s32>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: shufflevector_nxv1i32_0
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 1 x s32>), [[DEF]], shufflemask(undef)
- ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 1 x s32>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 1 x s32>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[EVEC]](s32)
+ ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s32>)
; RV64-NEXT: PseudoRET implicit $v8
%a = shufflevector <vscale x 1 x i32> poison, <vscale x 1 x i32> poison, <vscale x 1 x i32> poison
ret <vscale x 1 x i32> %a
@@ -926,15 +1080,19 @@ define <vscale x 1 x i32> @shufflevector_nxv1i32_1() {
; RV32-LABEL: name: shufflevector_nxv1i32_1
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 1 x s32>), [[DEF]], shufflemask(undef)
- ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 1 x s32>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 1 x s32>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[EVEC]](s32)
+ ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s32>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: shufflevector_nxv1i32_1
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 1 x s32>), [[DEF]], shufflemask(undef)
- ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 1 x s32>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 1 x s32>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[EVEC]](s32)
+ ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s32>)
; RV64-NEXT: PseudoRET implicit $v8
%a = shufflevector <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef
ret <vscale x 1 x i32> %a
@@ -946,9 +1104,10 @@ define <vscale x 1 x i32> @shufflevector_nxv1i32_2(<vscale x 1 x i32> %a) {
; RV32-NEXT: liveins: $v8
; RV32-NEXT: {{ $}}
; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 1 x s32>), [[DEF]], shufflemask(0)
- ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 1 x s32>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 1 x s32>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[EVEC]](s32)
+ ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s32>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: shufflevector_nxv1i32_2
@@ -956,9 +1115,10 @@ define <vscale x 1 x i32> @shufflevector_nxv1i32_2(<vscale x 1 x i32> %a) {
; RV64-NEXT: liveins: $v8
; RV64-NEXT: {{ $}}
; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 1 x s32>), [[DEF]], shufflemask(0)
- ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 1 x s32>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 1 x s32>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[EVEC]](s32)
+ ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s32>)
; RV64-NEXT: PseudoRET implicit $v8
%b = shufflevector <vscale x 1 x i32> %a , <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
ret <vscale x 1 x i32> %b
@@ -968,15 +1128,19 @@ define <vscale x 2 x i32> @shufflevector_nxv2i32_0() {
; RV32-LABEL: name: shufflevector_nxv2i32_0
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 2 x s32>), [[DEF]], shufflemask(undef, undef)
- ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 2 x s32>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 2 x s32>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[EVEC]](s32)
+ ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s32>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: shufflevector_nxv2i32_0
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 2 x s32>), [[DEF]], shufflemask(undef, undef)
- ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 2 x s32>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 2 x s32>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[EVEC]](s32)
+ ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s32>)
; RV64-NEXT: PseudoRET implicit $v8
%a = shufflevector <vscale x 2 x i32> poison, <vscale x 2 x i32> poison, <vscale x 2 x i32> poison
ret <vscale x 2 x i32> %a
@@ -986,15 +1150,19 @@ define <vscale x 2 x i32> @shufflevector_nxv2i32_1() {
; RV32-LABEL: name: shufflevector_nxv2i32_1
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 2 x s32>), [[DEF]], shufflemask(undef, undef)
- ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 2 x s32>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 2 x s32>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[EVEC]](s32)
+ ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s32>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: shufflevector_nxv2i32_1
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 2 x s32>), [[DEF]], shufflemask(undef, undef)
- ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 2 x s32>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 2 x s32>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[EVEC]](s32)
+ ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s32>)
; RV64-NEXT: PseudoRET implicit $v8
%a = shufflevector <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef
ret <vscale x 2 x i32> %a
@@ -1006,9 +1174,10 @@ define <vscale x 2 x i32> @shufflevector_nxv2i32_2(<vscale x 2 x i32> %a) {
; RV32-NEXT: liveins: $v8
; RV32-NEXT: {{ $}}
; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 2 x s32>), [[DEF]], shufflemask(0, 0)
- ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 2 x s32>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 2 x s32>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[EVEC]](s32)
+ ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s32>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: shufflevector_nxv2i32_2
@@ -1016,9 +1185,10 @@ define <vscale x 2 x i32> @shufflevector_nxv2i32_2(<vscale x 2 x i32> %a) {
; RV64-NEXT: liveins: $v8
; RV64-NEXT: {{ $}}
; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 2 x s32>), [[DEF]], shufflemask(0, 0)
- ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 2 x s32>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 2 x s32>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[EVEC]](s32)
+ ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s32>)
; RV64-NEXT: PseudoRET implicit $v8
%b = shufflevector <vscale x 2 x i32> %a , <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
ret <vscale x 2 x i32> %b
@@ -1028,15 +1198,19 @@ define <vscale x 4 x i32> @shufflevector_nxv4i32_0() {
; RV32-LABEL: name: shufflevector_nxv4i32_0
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 4 x s32>), [[DEF]], shufflemask(undef, undef, undef, undef)
- ; RV32-NEXT: $v8m2 = COPY [[SHUF]](<vscale x 4 x s32>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 4 x s32>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[EVEC]](s32)
+ ; RV32-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s32>)
; RV32-NEXT: PseudoRET implicit $v8m2
;
; RV64-LABEL: name: shufflevector_nxv4i32_0
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 4 x s32>), [[DEF]], shufflemask(undef, undef, undef, undef)
- ; RV64-NEXT: $v8m2 = COPY [[SHUF]](<vscale x 4 x s32>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 4 x s32>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[EVEC]](s32)
+ ; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s32>)
; RV64-NEXT: PseudoRET implicit $v8m2
%a = shufflevector <vscale x 4 x i32> poison, <vscale x 4 x i32> poison, <vscale x 4 x i32> poison
ret <vscale x 4 x i32> %a
@@ -1046,15 +1220,19 @@ define <vscale x 4 x i32> @shufflevector_nxv4i32_1() {
; RV32-LABEL: name: shufflevector_nxv4i32_1
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 4 x s32>), [[DEF]], shufflemask(undef, undef, undef, undef)
- ; RV32-NEXT: $v8m2 = COPY [[SHUF]](<vscale x 4 x s32>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 4 x s32>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[EVEC]](s32)
+ ; RV32-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s32>)
; RV32-NEXT: PseudoRET implicit $v8m2
;
; RV64-LABEL: name: shufflevector_nxv4i32_1
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 4 x s32>), [[DEF]], shufflemask(undef, undef, undef, undef)
- ; RV64-NEXT: $v8m2 = COPY [[SHUF]](<vscale x 4 x s32>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 4 x s32>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[EVEC]](s32)
+ ; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s32>)
; RV64-NEXT: PseudoRET implicit $v8m2
%a = shufflevector <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef
ret <vscale x 4 x i32> %a
@@ -1066,9 +1244,10 @@ define <vscale x 4 x i32> @shufflevector_nxv4i32_2(<vscale x 4 x i32> %a) {
; RV32-NEXT: liveins: $v8m2
; RV32-NEXT: {{ $}}
; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 4 x s32>), [[DEF]], shufflemask(0, 0, 0, 0)
- ; RV32-NEXT: $v8m2 = COPY [[SHUF]](<vscale x 4 x s32>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 4 x s32>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[EVEC]](s32)
+ ; RV32-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s32>)
; RV32-NEXT: PseudoRET implicit $v8m2
;
; RV64-LABEL: name: shufflevector_nxv4i32_2
@@ -1076,9 +1255,10 @@ define <vscale x 4 x i32> @shufflevector_nxv4i32_2(<vscale x 4 x i32> %a) {
; RV64-NEXT: liveins: $v8m2
; RV64-NEXT: {{ $}}
; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 4 x s32>), [[DEF]], shufflemask(0, 0, 0, 0)
- ; RV64-NEXT: $v8m2 = COPY [[SHUF]](<vscale x 4 x s32>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 4 x s32>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[EVEC]](s32)
+ ; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s32>)
; RV64-NEXT: PseudoRET implicit $v8m2
%b = shufflevector <vscale x 4 x i32> %a , <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
ret <vscale x 4 x i32> %b
@@ -1088,15 +1268,19 @@ define <vscale x 8 x i32> @shufflevector_nxv8i32_0() {
; RV32-LABEL: name: shufflevector_nxv8i32_0
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 8 x s32>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef)
- ; RV32-NEXT: $v8m4 = COPY [[SHUF]](<vscale x 8 x s32>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 8 x s32>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[EVEC]](s32)
+ ; RV32-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s32>)
; RV32-NEXT: PseudoRET implicit $v8m4
;
; RV64-LABEL: name: shufflevector_nxv8i32_0
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 8 x s32>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef)
- ; RV64-NEXT: $v8m4 = COPY [[SHUF]](<vscale x 8 x s32>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 8 x s32>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[EVEC]](s32)
+ ; RV64-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s32>)
; RV64-NEXT: PseudoRET implicit $v8m4
%a = shufflevector <vscale x 8 x i32> poison, <vscale x 8 x i32> poison, <vscale x 8 x i32> poison
ret <vscale x 8 x i32> %a
@@ -1106,15 +1290,19 @@ define <vscale x 8 x i32> @shufflevector_nxv8i32_1() {
; RV32-LABEL: name: shufflevector_nxv8i32_1
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 8 x s32>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef)
- ; RV32-NEXT: $v8m4 = COPY [[SHUF]](<vscale x 8 x s32>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 8 x s32>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[EVEC]](s32)
+ ; RV32-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s32>)
; RV32-NEXT: PseudoRET implicit $v8m4
;
; RV64-LABEL: name: shufflevector_nxv8i32_1
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 8 x s32>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef)
- ; RV64-NEXT: $v8m4 = COPY [[SHUF]](<vscale x 8 x s32>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 8 x s32>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[EVEC]](s32)
+ ; RV64-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s32>)
; RV64-NEXT: PseudoRET implicit $v8m4
%a = shufflevector <vscale x 8 x i32> undef, <vscale x 8 x i32> undef, <vscale x 8 x i32> undef
ret <vscale x 8 x i32> %a
@@ -1126,9 +1314,10 @@ define <vscale x 8 x i32> @shufflevector_nxv8i32_2(<vscale x 8 x i32> %a) {
; RV32-NEXT: liveins: $v8m4
; RV32-NEXT: {{ $}}
; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 8 x s32>), [[DEF]], shufflemask(0, 0, 0, 0, 0, 0, 0, 0)
- ; RV32-NEXT: $v8m4 = COPY [[SHUF]](<vscale x 8 x s32>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 8 x s32>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[EVEC]](s32)
+ ; RV32-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s32>)
; RV32-NEXT: PseudoRET implicit $v8m4
;
; RV64-LABEL: name: shufflevector_nxv8i32_2
@@ -1136,9 +1325,10 @@ define <vscale x 8 x i32> @shufflevector_nxv8i32_2(<vscale x 8 x i32> %a) {
; RV64-NEXT: liveins: $v8m4
; RV64-NEXT: {{ $}}
; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 8 x s32>), [[DEF]], shufflemask(0, 0, 0, 0, 0, 0, 0, 0)
- ; RV64-NEXT: $v8m4 = COPY [[SHUF]](<vscale x 8 x s32>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 8 x s32>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[EVEC]](s32)
+ ; RV64-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s32>)
; RV64-NEXT: PseudoRET implicit $v8m4
%b = shufflevector <vscale x 8 x i32> %a , <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
ret <vscale x 8 x i32> %b
@@ -1148,15 +1338,19 @@ define <vscale x 16 x i32> @shufflevector_nxv16i32_0() {
; RV32-LABEL: name: shufflevector_nxv16i32_0
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 16 x s32>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef)
- ; RV32-NEXT: $v8m8 = COPY [[SHUF]](<vscale x 16 x s32>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 16 x s32>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[EVEC]](s32)
+ ; RV32-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s32>)
; RV32-NEXT: PseudoRET implicit $v8m8
;
; RV64-LABEL: name: shufflevector_nxv16i32_0
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 16 x s32>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef)
- ; RV64-NEXT: $v8m8 = COPY [[SHUF]](<vscale x 16 x s32>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 16 x s32>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[EVEC]](s32)
+ ; RV64-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s32>)
; RV64-NEXT: PseudoRET implicit $v8m8
%a = shufflevector <vscale x 16 x i32> poison, <vscale x 16 x i32> poison, <vscale x 16 x i32> poison
ret <vscale x 16 x i32> %a
@@ -1166,15 +1360,19 @@ define <vscale x 16 x i32> @shufflevector_nxv16i32_1() {
; RV32-LABEL: name: shufflevector_nxv16i32_1
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 16 x s32>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef)
- ; RV32-NEXT: $v8m8 = COPY [[SHUF]](<vscale x 16 x s32>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 16 x s32>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[EVEC]](s32)
+ ; RV32-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s32>)
; RV32-NEXT: PseudoRET implicit $v8m8
;
; RV64-LABEL: name: shufflevector_nxv16i32_1
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 16 x s32>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef)
- ; RV64-NEXT: $v8m8 = COPY [[SHUF]](<vscale x 16 x s32>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 16 x s32>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[EVEC]](s32)
+ ; RV64-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s32>)
; RV64-NEXT: PseudoRET implicit $v8m8
%a = shufflevector <vscale x 16 x i32> undef, <vscale x 16 x i32> undef, <vscale x 16 x i32> undef
ret <vscale x 16 x i32> %a
@@ -1186,9 +1384,10 @@ define <vscale x 16 x i32> @shufflevector_nxv16i32_2(<vscale x 16 x i32> %a) {
; RV32-NEXT: liveins: $v8m8
; RV32-NEXT: {{ $}}
; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8m8
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 16 x s32>), [[DEF]], shufflemask(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
- ; RV32-NEXT: $v8m8 = COPY [[SHUF]](<vscale x 16 x s32>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 16 x s32>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[EVEC]](s32)
+ ; RV32-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s32>)
; RV32-NEXT: PseudoRET implicit $v8m8
;
; RV64-LABEL: name: shufflevector_nxv16i32_2
@@ -1196,9 +1395,10 @@ define <vscale x 16 x i32> @shufflevector_nxv16i32_2(<vscale x 16 x i32> %a) {
; RV64-NEXT: liveins: $v8m8
; RV64-NEXT: {{ $}}
; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8m8
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 16 x s32>), [[DEF]], shufflemask(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
- ; RV64-NEXT: $v8m8 = COPY [[SHUF]](<vscale x 16 x s32>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 16 x s32>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[EVEC]](s32)
+ ; RV64-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s32>)
; RV64-NEXT: PseudoRET implicit $v8m8
%b = shufflevector <vscale x 16 x i32> %a , <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
ret <vscale x 16 x i32> %b
@@ -1208,15 +1408,19 @@ define <vscale x 1 x i64> @shufflevector_nxv1i64_0() {
; RV32-LABEL: name: shufflevector_nxv1i64_0
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 1 x s64>), [[DEF]], shufflemask(undef)
- ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 1 x s64>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 1 x s64>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR [[EVEC]](s64)
+ ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s64>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: shufflevector_nxv1i64_0
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 1 x s64>), [[DEF]], shufflemask(undef)
- ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 1 x s64>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 1 x s64>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR [[EVEC]](s64)
+ ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s64>)
; RV64-NEXT: PseudoRET implicit $v8
%a = shufflevector <vscale x 1 x i64> poison, <vscale x 1 x i64> poison, <vscale x 1 x i32> poison
ret <vscale x 1 x i64> %a
@@ -1226,15 +1430,19 @@ define <vscale x 1 x i64> @shufflevector_nxv1i64_1() {
; RV32-LABEL: name: shufflevector_nxv1i64_1
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 1 x s64>), [[DEF]], shufflemask(undef)
- ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 1 x s64>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 1 x s64>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR [[EVEC]](s64)
+ ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s64>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: shufflevector_nxv1i64_1
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 1 x s64>), [[DEF]], shufflemask(undef)
- ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 1 x s64>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 1 x s64>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR [[EVEC]](s64)
+ ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s64>)
; RV64-NEXT: PseudoRET implicit $v8
%a = shufflevector <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i32> undef
ret <vscale x 1 x i64> %a
@@ -1246,9 +1454,10 @@ define <vscale x 1 x i64> @shufflevector_nxv1i64_2(<vscale x 1 x i64> %a) {
; RV32-NEXT: liveins: $v8
; RV32-NEXT: {{ $}}
; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 1 x s64>), [[DEF]], shufflemask(0)
- ; RV32-NEXT: $v8 = COPY [[SHUF]](<vscale x 1 x s64>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 1 x s64>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR [[EVEC]](s64)
+ ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s64>)
; RV32-NEXT: PseudoRET implicit $v8
;
; RV64-LABEL: name: shufflevector_nxv1i64_2
@@ -1256,9 +1465,10 @@ define <vscale x 1 x i64> @shufflevector_nxv1i64_2(<vscale x 1 x i64> %a) {
; RV64-NEXT: liveins: $v8
; RV64-NEXT: {{ $}}
; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 1 x s64>), [[DEF]], shufflemask(0)
- ; RV64-NEXT: $v8 = COPY [[SHUF]](<vscale x 1 x s64>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 1 x s64>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR [[EVEC]](s64)
+ ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s64>)
; RV64-NEXT: PseudoRET implicit $v8
%b = shufflevector <vscale x 1 x i64> %a , <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
ret <vscale x 1 x i64> %b
@@ -1268,15 +1478,19 @@ define <vscale x 2 x i64> @shufflevector_nxv2i64_0() {
; RV32-LABEL: name: shufflevector_nxv2i64_0
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 2 x s64>), [[DEF]], shufflemask(undef, undef)
- ; RV32-NEXT: $v8m2 = COPY [[SHUF]](<vscale x 2 x s64>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 2 x s64>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR [[EVEC]](s64)
+ ; RV32-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s64>)
; RV32-NEXT: PseudoRET implicit $v8m2
;
; RV64-LABEL: name: shufflevector_nxv2i64_0
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 2 x s64>), [[DEF]], shufflemask(undef, undef)
- ; RV64-NEXT: $v8m2 = COPY [[SHUF]](<vscale x 2 x s64>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 2 x s64>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR [[EVEC]](s64)
+ ; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s64>)
; RV64-NEXT: PseudoRET implicit $v8m2
%a = shufflevector <vscale x 2 x i64> poison, <vscale x 2 x i64> poison, <vscale x 2 x i32> poison
ret <vscale x 2 x i64> %a
@@ -1286,15 +1500,19 @@ define <vscale x 2 x i64> @shufflevector_nxv2i64_1() {
; RV32-LABEL: name: shufflevector_nxv2i64_1
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 2 x s64>), [[DEF]], shufflemask(undef, undef)
- ; RV32-NEXT: $v8m2 = COPY [[SHUF]](<vscale x 2 x s64>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 2 x s64>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR [[EVEC]](s64)
+ ; RV32-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s64>)
; RV32-NEXT: PseudoRET implicit $v8m2
;
; RV64-LABEL: name: shufflevector_nxv2i64_1
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 2 x s64>), [[DEF]], shufflemask(undef, undef)
- ; RV64-NEXT: $v8m2 = COPY [[SHUF]](<vscale x 2 x s64>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 2 x s64>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR [[EVEC]](s64)
+ ; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s64>)
; RV64-NEXT: PseudoRET implicit $v8m2
%a = shufflevector <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i32> undef
ret <vscale x 2 x i64> %a
@@ -1306,9 +1524,10 @@ define <vscale x 2 x i64> @shufflevector_nxv2i64_2(<vscale x 2 x i64> %a) {
; RV32-NEXT: liveins: $v8m2
; RV32-NEXT: {{ $}}
; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 2 x s64>), [[DEF]], shufflemask(0, 0)
- ; RV32-NEXT: $v8m2 = COPY [[SHUF]](<vscale x 2 x s64>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 2 x s64>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR [[EVEC]](s64)
+ ; RV32-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s64>)
; RV32-NEXT: PseudoRET implicit $v8m2
;
; RV64-LABEL: name: shufflevector_nxv2i64_2
@@ -1316,9 +1535,10 @@ define <vscale x 2 x i64> @shufflevector_nxv2i64_2(<vscale x 2 x i64> %a) {
; RV64-NEXT: liveins: $v8m2
; RV64-NEXT: {{ $}}
; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 2 x s64>), [[DEF]], shufflemask(0, 0)
- ; RV64-NEXT: $v8m2 = COPY [[SHUF]](<vscale x 2 x s64>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 2 x s64>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR [[EVEC]](s64)
+ ; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s64>)
; RV64-NEXT: PseudoRET implicit $v8m2
%b = shufflevector <vscale x 2 x i64> %a , <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
ret <vscale x 2 x i64> %b
@@ -1328,15 +1548,19 @@ define <vscale x 4 x i64> @shufflevector_nxv4i64_0() {
; RV32-LABEL: name: shufflevector_nxv4i64_0
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 4 x s64>), [[DEF]], shufflemask(undef, undef, undef, undef)
- ; RV32-NEXT: $v8m4 = COPY [[SHUF]](<vscale x 4 x s64>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 4 x s64>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR [[EVEC]](s64)
+ ; RV32-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s64>)
; RV32-NEXT: PseudoRET implicit $v8m4
;
; RV64-LABEL: name: shufflevector_nxv4i64_0
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 4 x s64>), [[DEF]], shufflemask(undef, undef, undef, undef)
- ; RV64-NEXT: $v8m4 = COPY [[SHUF]](<vscale x 4 x s64>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 4 x s64>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR [[EVEC]](s64)
+ ; RV64-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s64>)
; RV64-NEXT: PseudoRET implicit $v8m4
%a = shufflevector <vscale x 4 x i64> poison, <vscale x 4 x i64> poison, <vscale x 4 x i32> poison
ret <vscale x 4 x i64> %a
@@ -1346,15 +1570,19 @@ define <vscale x 4 x i64> @shufflevector_nxv4i64_1() {
; RV32-LABEL: name: shufflevector_nxv4i64_1
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 4 x s64>), [[DEF]], shufflemask(undef, undef, undef, undef)
- ; RV32-NEXT: $v8m4 = COPY [[SHUF]](<vscale x 4 x s64>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 4 x s64>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR [[EVEC]](s64)
+ ; RV32-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s64>)
; RV32-NEXT: PseudoRET implicit $v8m4
;
; RV64-LABEL: name: shufflevector_nxv4i64_1
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 4 x s64>), [[DEF]], shufflemask(undef, undef, undef, undef)
- ; RV64-NEXT: $v8m4 = COPY [[SHUF]](<vscale x 4 x s64>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 4 x s64>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR [[EVEC]](s64)
+ ; RV64-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s64>)
; RV64-NEXT: PseudoRET implicit $v8m4
%a = shufflevector <vscale x 4 x i64> undef, <vscale x 4 x i64> undef, <vscale x 4 x i32> undef
ret <vscale x 4 x i64> %a
@@ -1366,9 +1594,10 @@ define <vscale x 4 x i64> @shufflevector_nxv4i64_2(<vscale x 4 x i64> %a) {
; RV32-NEXT: liveins: $v8m4
; RV32-NEXT: {{ $}}
; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8m4
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 4 x s64>), [[DEF]], shufflemask(0, 0, 0, 0)
- ; RV32-NEXT: $v8m4 = COPY [[SHUF]](<vscale x 4 x s64>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 4 x s64>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR [[EVEC]](s64)
+ ; RV32-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s64>)
; RV32-NEXT: PseudoRET implicit $v8m4
;
; RV64-LABEL: name: shufflevector_nxv4i64_2
@@ -1376,9 +1605,10 @@ define <vscale x 4 x i64> @shufflevector_nxv4i64_2(<vscale x 4 x i64> %a) {
; RV64-NEXT: liveins: $v8m4
; RV64-NEXT: {{ $}}
; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8m4
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 4 x s64>), [[DEF]], shufflemask(0, 0, 0, 0)
- ; RV64-NEXT: $v8m4 = COPY [[SHUF]](<vscale x 4 x s64>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 4 x s64>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR [[EVEC]](s64)
+ ; RV64-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s64>)
; RV64-NEXT: PseudoRET implicit $v8m4
%b = shufflevector <vscale x 4 x i64> %a , <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
ret <vscale x 4 x i64> %b
@@ -1388,15 +1618,19 @@ define <vscale x 8 x i64> @shufflevector_nxv8i64_0() {
; RV32-LABEL: name: shufflevector_nxv8i64_0
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 8 x s64>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef)
- ; RV32-NEXT: $v8m8 = COPY [[SHUF]](<vscale x 8 x s64>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 8 x s64>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR [[EVEC]](s64)
+ ; RV32-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s64>)
; RV32-NEXT: PseudoRET implicit $v8m8
;
; RV64-LABEL: name: shufflevector_nxv8i64_0
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 8 x s64>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef)
- ; RV64-NEXT: $v8m8 = COPY [[SHUF]](<vscale x 8 x s64>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 8 x s64>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR [[EVEC]](s64)
+ ; RV64-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s64>)
; RV64-NEXT: PseudoRET implicit $v8m8
%a = shufflevector <vscale x 8 x i64> poison, <vscale x 8 x i64> poison, <vscale x 8 x i32> poison
ret <vscale x 8 x i64> %a
@@ -1406,15 +1640,19 @@ define <vscale x 8 x i64> @shufflevector_nxv8i64_1() {
; RV32-LABEL: name: shufflevector_nxv8i64_1
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 8 x s64>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef)
- ; RV32-NEXT: $v8m8 = COPY [[SHUF]](<vscale x 8 x s64>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 8 x s64>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR [[EVEC]](s64)
+ ; RV32-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s64>)
; RV32-NEXT: PseudoRET implicit $v8m8
;
; RV64-LABEL: name: shufflevector_nxv8i64_1
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 8 x s64>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef)
- ; RV64-NEXT: $v8m8 = COPY [[SHUF]](<vscale x 8 x s64>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 8 x s64>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR [[EVEC]](s64)
+ ; RV64-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s64>)
; RV64-NEXT: PseudoRET implicit $v8m8
%a = shufflevector <vscale x 8 x i64> undef, <vscale x 8 x i64> undef, <vscale x 8 x i32> undef
ret <vscale x 8 x i64> %a
@@ -1426,9 +1664,10 @@ define <vscale x 8 x i64> @shufflevector_nxv8i64_2(<vscale x 8 x i64> %a) {
; RV32-NEXT: liveins: $v8m8
; RV32-NEXT: {{ $}}
; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8m8
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 8 x s64>), [[DEF]], shufflemask(0, 0, 0, 0, 0, 0, 0, 0)
- ; RV32-NEXT: $v8m8 = COPY [[SHUF]](<vscale x 8 x s64>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 8 x s64>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR [[EVEC]](s64)
+ ; RV32-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s64>)
; RV32-NEXT: PseudoRET implicit $v8m8
;
; RV64-LABEL: name: shufflevector_nxv8i64_2
@@ -1436,9 +1675,10 @@ define <vscale x 8 x i64> @shufflevector_nxv8i64_2(<vscale x 8 x i64> %a) {
; RV64-NEXT: liveins: $v8m8
; RV64-NEXT: {{ $}}
; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8m8
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SHUFFLE_VECTOR [[COPY]](<vscale x 8 x s64>), [[DEF]], shufflemask(0, 0, 0, 0, 0, 0, 0, 0)
- ; RV64-NEXT: $v8m8 = COPY [[SHUF]](<vscale x 8 x s64>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 8 x s64>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR [[EVEC]](s64)
+ ; RV64-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s64>)
; RV64-NEXT: PseudoRET implicit $v8m8
%b = shufflevector <vscale x 8 x i64> %a , <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
ret <vscale x 8 x i64> %b
@@ -1448,8 +1688,10 @@ define <vscale x 16 x i64> @shufflevector_nxv16i64_0() {
; RV32-LABEL: name: shufflevector_nxv16i64_0
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s64>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s64>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 16 x s64>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef)
- ; RV32-NEXT: [[UV:%[0-9]+]]:_(<vscale x 8 x s64>), [[UV1:%[0-9]+]]:_(<vscale x 8 x s64>) = G_UNMERGE_VALUES [[SHUF]](<vscale x 16 x s64>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 16 x s64>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s64>) = G_SPLAT_VECTOR [[EVEC]](s64)
+ ; RV32-NEXT: [[UV:%[0-9]+]]:_(<vscale x 8 x s64>), [[UV1:%[0-9]+]]:_(<vscale x 8 x s64>) = G_UNMERGE_VALUES [[SPLAT_VECTOR]](<vscale x 16 x s64>)
; RV32-NEXT: $v8m8 = COPY [[UV]](<vscale x 8 x s64>)
; RV32-NEXT: $v16m8 = COPY [[UV1]](<vscale x 8 x s64>)
; RV32-NEXT: PseudoRET implicit $v8m8, implicit $v16m8
@@ -1457,8 +1699,10 @@ define <vscale x 16 x i64> @shufflevector_nxv16i64_0() {
; RV64-LABEL: name: shufflevector_nxv16i64_0
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s64>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s64>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 16 x s64>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef)
- ; RV64-NEXT: [[UV:%[0-9]+]]:_(<vscale x 8 x s64>), [[UV1:%[0-9]+]]:_(<vscale x 8 x s64>) = G_UNMERGE_VALUES [[SHUF]](<vscale x 16 x s64>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 16 x s64>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s64>) = G_SPLAT_VECTOR [[EVEC]](s64)
+ ; RV64-NEXT: [[UV:%[0-9]+]]:_(<vscale x 8 x s64>), [[UV1:%[0-9]+]]:_(<vscale x 8 x s64>) = G_UNMERGE_VALUES [[SPLAT_VECTOR]](<vscale x 16 x s64>)
; RV64-NEXT: $v8m8 = COPY [[UV]](<vscale x 8 x s64>)
; RV64-NEXT: $v16m8 = COPY [[UV1]](<vscale x 8 x s64>)
; RV64-NEXT: PseudoRET implicit $v8m8, implicit $v16m8
@@ -1470,8 +1714,10 @@ define <vscale x 16 x i64> @shufflevector_nxv16i64_1() {
; RV32-LABEL: name: shufflevector_nxv16i64_1
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s64>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s64>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 16 x s64>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef)
- ; RV32-NEXT: [[UV:%[0-9]+]]:_(<vscale x 8 x s64>), [[UV1:%[0-9]+]]:_(<vscale x 8 x s64>) = G_UNMERGE_VALUES [[SHUF]](<vscale x 16 x s64>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 16 x s64>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s64>) = G_SPLAT_VECTOR [[EVEC]](s64)
+ ; RV32-NEXT: [[UV:%[0-9]+]]:_(<vscale x 8 x s64>), [[UV1:%[0-9]+]]:_(<vscale x 8 x s64>) = G_UNMERGE_VALUES [[SPLAT_VECTOR]](<vscale x 16 x s64>)
; RV32-NEXT: $v8m8 = COPY [[UV]](<vscale x 8 x s64>)
; RV32-NEXT: $v16m8 = COPY [[UV1]](<vscale x 8 x s64>)
; RV32-NEXT: PseudoRET implicit $v8m8, implicit $v16m8
@@ -1479,8 +1725,10 @@ define <vscale x 16 x i64> @shufflevector_nxv16i64_1() {
; RV64-LABEL: name: shufflevector_nxv16i64_1
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s64>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s64>) = G_SHUFFLE_VECTOR [[DEF]](<vscale x 16 x s64>), [[DEF]], shufflemask(undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef, undef)
- ; RV64-NEXT: [[UV:%[0-9]+]]:_(<vscale x 8 x s64>), [[UV1:%[0-9]+]]:_(<vscale x 8 x s64>) = G_UNMERGE_VALUES [[SHUF]](<vscale x 16 x s64>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[DEF]](<vscale x 16 x s64>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s64>) = G_SPLAT_VECTOR [[EVEC]](s64)
+ ; RV64-NEXT: [[UV:%[0-9]+]]:_(<vscale x 8 x s64>), [[UV1:%[0-9]+]]:_(<vscale x 8 x s64>) = G_UNMERGE_VALUES [[SPLAT_VECTOR]](<vscale x 16 x s64>)
; RV64-NEXT: $v8m8 = COPY [[UV]](<vscale x 8 x s64>)
; RV64-NEXT: $v16m8 = COPY [[UV1]](<vscale x 8 x s64>)
; RV64-NEXT: PseudoRET implicit $v8m8, implicit $v16m8
@@ -1496,9 +1744,10 @@ define <vscale x 16 x i64> @shufflevector_nxv16i64_2(<vscale x 16 x i64> %a) {
; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8m8
; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v16m8
; RV32-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 16 x s64>) = G_CONCAT_VECTORS [[COPY]](<vscale x 8 x s64>), [[COPY1]](<vscale x 8 x s64>)
- ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s64>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s64>) = G_SHUFFLE_VECTOR [[CONCAT_VECTORS]](<vscale x 16 x s64>), [[DEF]], shufflemask(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
- ; RV32-NEXT: [[UV:%[0-9]+]]:_(<vscale x 8 x s64>), [[UV1:%[0-9]+]]:_(<vscale x 8 x s64>) = G_UNMERGE_VALUES [[SHUF]](<vscale x 16 x s64>)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[CONCAT_VECTORS]](<vscale x 16 x s64>), [[C]](s64)
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s64>) = G_SPLAT_VECTOR [[EVEC]](s64)
+ ; RV32-NEXT: [[UV:%[0-9]+]]:_(<vscale x 8 x s64>), [[UV1:%[0-9]+]]:_(<vscale x 8 x s64>) = G_UNMERGE_VALUES [[SPLAT_VECTOR]](<vscale x 16 x s64>)
; RV32-NEXT: $v8m8 = COPY [[UV]](<vscale x 8 x s64>)
; RV32-NEXT: $v16m8 = COPY [[UV1]](<vscale x 8 x s64>)
; RV32-NEXT: PseudoRET implicit $v8m8, implicit $v16m8
@@ -1510,9 +1759,10 @@ define <vscale x 16 x i64> @shufflevector_nxv16i64_2(<vscale x 16 x i64> %a) {
; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8m8
; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v16m8
; RV64-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<vscale x 16 x s64>) = G_CONCAT_VECTORS [[COPY]](<vscale x 8 x s64>), [[COPY1]](<vscale x 8 x s64>)
- ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s64>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[SHUF:%[0-9]+]]:_(<vscale x 16 x s64>) = G_SHUFFLE_VECTOR [[CONCAT_VECTORS]](<vscale x 16 x s64>), [[DEF]], shufflemask(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
- ; RV64-NEXT: [[UV:%[0-9]+]]:_(<vscale x 8 x s64>), [[UV1:%[0-9]+]]:_(<vscale x 8 x s64>) = G_UNMERGE_VALUES [[SHUF]](<vscale x 16 x s64>)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[CONCAT_VECTORS]](<vscale x 16 x s64>), [[C]](s64)
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s64>) = G_SPLAT_VECTOR [[EVEC]](s64)
+ ; RV64-NEXT: [[UV:%[0-9]+]]:_(<vscale x 8 x s64>), [[UV1:%[0-9]+]]:_(<vscale x 8 x s64>) = G_UNMERGE_VALUES [[SPLAT_VECTOR]](<vscale x 16 x s64>)
; RV64-NEXT: $v8m8 = COPY [[UV]](<vscale x 8 x s64>)
; RV64-NEXT: $v16m8 = COPY [[UV1]](<vscale x 8 x s64>)
; RV64-NEXT: PseudoRET implicit $v8m8, implicit $v16m8
>From 26ba700b01facc3f4b142c1be22fd12e6be7249c Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Wed, 7 Feb 2024 07:31:15 -0800
Subject: [PATCH 4/6] !fixup undo scalable vector shuffle vector in verifier
---
.../CodeGen/GlobalISel/MachineIRBuilder.h | 3 +-
llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp | 6 ++-
llvm/lib/CodeGen/MachineVerifier.cpp | 51 ++++++++++---------
.../MachineVerifier/test_g_shuffle_vector.mir | 11 ----
.../MachineVerifier/test_g_splat_vector.mir | 33 ++++++++++++
5 files changed, 65 insertions(+), 39 deletions(-)
create mode 100644 llvm/test/MachineVerifier/test_g_splat_vector.mir
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h b/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
index 634403b069aa8..15095d3e1ec36 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
@@ -1106,8 +1106,7 @@ class MachineIRBuilder {
/// \pre \p Val must be a generic virtual register with scalar type.
///
/// \return a MachineInstrBuilder for the newly created instruction.
- MachineInstrBuilder buildSplatVector(const DstOp &Res,
- const SrcOp &Val);
+ MachineInstrBuilder buildSplatVector(const DstOp &Res, const SrcOp &Val);
/// Build and insert \p Res = G_CONCAT_VECTORS \p Op0, ...
///
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index ed2687f0a8e64..156ec9414f5c2 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -1643,8 +1643,10 @@ bool IRTranslator::translateGetElementPtr(const User &U,
LLT IdxTy = MRI->getType(IdxReg);
if (IdxTy != OffsetTy) {
if (!IdxTy.isVector() && WantSplatVector) {
- IdxReg = MIRBuilder.buildBuildVectorSplatVector(
- OffsetTy.changeElementType(IdxTy), IdxReg).getReg(0);
+ IdxReg = MIRBuilder
+ .buildBuildVectorSplatVector(
+ OffsetTy.changeElementType(IdxTy), IdxReg)
+ .getReg(0);
}
IdxReg = MIRBuilder.buildSExtOrTrunc(OffsetTy, IdxReg).getReg(0);
diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp
index 8ccd4192a0014..cf8b31c13bd19 100644
--- a/llvm/lib/CodeGen/MachineVerifier.cpp
+++ b/llvm/lib/CodeGen/MachineVerifier.cpp
@@ -1618,39 +1618,42 @@ void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
// Don't check that all operands are vector because scalars are used in
// place of 1 element vectors.
- ElementCount SrcNumElts = Src0Ty.isVector() ? Src0Ty.getElementCount()
- : ElementCount::getFixed(1);
- ElementCount DstNumElts =
- DstTy.isVector() ? DstTy.getElementCount() : ElementCount::getFixed(1);
+ int SrcNumElts = Src0Ty.isVector() ? Src0Ty.getNumElements() : 1;
+ int DstNumElts = DstTy.isVector() ? DstTy.getNumElements() : 1;
ArrayRef<int> MaskIdxes = MaskOp.getShuffleMask();
- // For scalable vectors, there is an entry in the Mask for each
- // KnownMinValue.
- if (MaskIdxes.size() != DstNumElts.getKnownMinValue())
+ if (static_cast<int>(MaskIdxes.size()) != DstNumElts)
report("Wrong result type for shufflemask", MI);
- if (Src0Ty.isScalableVector()) {
- if (!llvm::all_of(MaskIdxes,
- [&MaskIdxes](int M) { return M == MaskIdxes[0]; }))
- report("Elements of a scalable G_SHUFFLE_VECTOR mask must match", MI);
- if (MaskIdxes[0] != 0 && MaskIdxes[0] != -1)
- report("Elements of a scalable G_SHUFFLE_VECTOR mask must be zero or "
- "undef",
- MI);
- } else {
- // Idxes for fixed vectors must be in bounds or undef, which is
- // represented as -1.
- for (int Idx : MaskIdxes) {
- if (Idx < 0)
- continue;
- if ((unsigned)Idx >= 2 * SrcNumElts.getFixedValue())
- report("Out of bounds shuffle index", MI);
- }
+ for (int Idx : MaskIdxes) {
+ if (Idx < 0)
+ continue;
+
+ if (Idx >= 2 * SrcNumElts)
+ report("Out of bounds shuffle index", MI);
}
break;
}
+
+ case TargetOpcode::G_SPLAT_VECTOR : {
+ LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
+ LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
+
+ if (!DstTy.isVector())
+ report("Destination type must be a vector", MI);
+
+ if (!SrcTy.isScalar())
+ report("Source type must be a scalar", MI);
+
+ if (DstTy.getScalarType() != SrcTy)
+ report("Element type of the destination must be the same type as the "
+ "source type",
+ MI);
+
+ break;
+ }
case TargetOpcode::G_DYN_STACKALLOC: {
const MachineOperand &DstOp = MI->getOperand(0);
const MachineOperand &AllocOp = MI->getOperand(1);
diff --git a/llvm/test/MachineVerifier/test_g_shuffle_vector.mir b/llvm/test/MachineVerifier/test_g_shuffle_vector.mir
index 41f46d0c184ee..6aba6731c9fee 100644
--- a/llvm/test/MachineVerifier/test_g_shuffle_vector.mir
+++ b/llvm/test/MachineVerifier/test_g_shuffle_vector.mir
@@ -56,15 +56,4 @@ body: |
%22:_(s32) = G_IMPLICIT_DEF
%20:_(<2 x s32>) = G_SHUFFLE_VECTOR %22, %22, shufflemask(0, 2)
- %23:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
- %24:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
-
- ; CHECK: Bad machine code: Wrong result type for shufflemask
- %25:_(<vscale x 1 x s32>) = G_SHUFFLE_VECTOR %23, %23, shufflemask(0, 0)
-
- ; CHECK: Elements of a scalable G_SHUFFLE_VECTOR mask must match
- %26:_(<vscale x 2 x s32>) = G_SHUFFLE_VECTOR %24, %24, shufflemask(0, -1)
-
- ; CHECK: Bad machine code: Elements of a scalable G_SHUFFLE_VECTOR mask must be zero or undef
- %27:_(<vscale x 2 x s32>) = G_SHUFFLE_VECTOR %24, %24, shufflemask(1, 1)
...
diff --git a/llvm/test/MachineVerifier/test_g_splat_vector.mir b/llvm/test/MachineVerifier/test_g_splat_vector.mir
new file mode 100644
index 0000000000000..6fdb937777485
--- /dev/null
+++ b/llvm/test/MachineVerifier/test_g_splat_vector.mir
@@ -0,0 +1,33 @@
+# RUN: not --crash llc -o - -mtriple=arm64 -run-pass=none -verify-machineinstrs %s 2>&1 | FileCheck %s
+# REQUIRES: aarch64-registered-target
+---
+name: g_splat_vector
+tracksRegLiveness: true
+liveins:
+body: |
+ bb.0:
+ %0:_(<2 x s32>) = G_IMPLICIT_DEF
+ %1:_(s32) = G_CONSTANT i32 0
+ %2:_(s32) = G_CONSTANT i32 1
+ %3:_(<2 x s32>) = G_BUILD_VECTOR %1, %2
+
+ ; CHECK: Destination type must be a vector
+ %4:_(s32) = G_SPLAT_VECTOR %1
+
+ ; CHECK: Source type must be a scalar
+ %5:_(<2 x s32>) = G_SPLAT_VECTOR %3
+
+ ; CHECK: Element type of the destination must be the same type as the source type
+ %6:_(<2 x s64>) = G_SPLAT_VECTOR %5
+
+ %7:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+
+ ; CHECK: Destination type must be a vector
+ %8:_(s32) = G_SPLAT_VECTOR %7
+
+ ; CHECK: Source type must be a scalar
+ %9:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR %7
+
+ ; CHECK: Element type of the destination must be the same type as the source type
+ %10:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR %7
+...
>From 2136bb0a73f57e5d83f86ee0c0f3ba4da383b133 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Wed, 7 Feb 2024 14:22:26 -0800
Subject: [PATCH 5/6] fixup! respond to review comments
---
llvm/docs/GlobalISel/GenericOpcode.rst | 5 +++++
llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h | 3 +--
llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp | 6 ++----
llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp | 8 ++++----
llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp | 2 +-
llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp | 9 ++++-----
llvm/lib/CodeGen/MachineVerifier.cpp | 2 +-
7 files changed, 18 insertions(+), 17 deletions(-)
diff --git a/llvm/docs/GlobalISel/GenericOpcode.rst b/llvm/docs/GlobalISel/GenericOpcode.rst
index 26ff34376fb83..64317281a445c 100644
--- a/llvm/docs/GlobalISel/GenericOpcode.rst
+++ b/llvm/docs/GlobalISel/GenericOpcode.rst
@@ -639,6 +639,11 @@ Concatenate two vectors and shuffle the elements according to the mask operand.
The mask operand should be an IR Constant which exactly matches the
corresponding mask for the IR shufflevector instruction.
+G_SPLAT_VECTOR
+^^^^^^^^^^^^^^^^
+
+Create a vector where all elements are the scalar from the source operand.
+
Vector Reduction Operations
---------------------------
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h b/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
index 15095d3e1ec36..6762b1b360d5e 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
@@ -1063,8 +1063,7 @@ class MachineIRBuilder {
/// Build and insert \p Res = G_BUILD_VECTOR with \p Src replicated to fill
/// the number of elements
- MachineInstrBuilder buildBuildVectorSplatVector(const DstOp &Res,
- const SrcOp &Src);
+ MachineInstrBuilder buildSplatBuildVector(const DstOp &Res, const SrcOp &Src);
/// Build and insert \p Res = G_BUILD_VECTOR_TRUNC \p Op0, ...
///
diff --git a/llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp b/llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp
index 0289e753d5614..1869e0d41a51f 100644
--- a/llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp
@@ -309,8 +309,7 @@ MachineInstrBuilder CSEMIRBuilder::buildConstant(const DstOp &Res,
// For vectors, CSE the element only for now.
LLT Ty = Res.getLLTTy(*getMRI());
if (Ty.isVector())
- return buildBuildVectorSplatVector(Res,
- buildConstant(Ty.getElementType(), Val));
+ return buildSplatBuildVector(Res, buildConstant(Ty.getElementType(), Val));
FoldingSetNodeID ID;
GISelInstProfileBuilder ProfBuilder(ID, *getMRI());
@@ -337,8 +336,7 @@ MachineInstrBuilder CSEMIRBuilder::buildFConstant(const DstOp &Res,
// For vectors, CSE the element only for now.
LLT Ty = Res.getLLTTy(*getMRI());
if (Ty.isVector())
- return buildBuildVectorSplatVector(
- Res, buildFConstant(Ty.getElementType(), Val));
+ return buildSplatBuildVector(Res, buildFConstant(Ty.getElementType(), Val));
FoldingSetNodeID ID;
GISelInstProfileBuilder ProfBuilder(ID, *getMRI());
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 156ec9414f5c2..b16e1d6b37862 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -1603,8 +1603,8 @@ bool IRTranslator::translateGetElementPtr(const User &U,
// are vectors.
if (WantSplatVector && !PtrTy.isVector()) {
BaseReg = MIRBuilder
- .buildBuildVectorSplatVector(
- LLT::fixed_vector(VectorWidth, PtrTy), BaseReg)
+ .buildSplatBuildVector(LLT::fixed_vector(VectorWidth, PtrTy),
+ BaseReg)
.getReg(0);
PtrIRTy = FixedVectorType::get(PtrIRTy, VectorWidth);
PtrTy = getLLTForType(*PtrIRTy, *DL);
@@ -1644,8 +1644,8 @@ bool IRTranslator::translateGetElementPtr(const User &U,
if (IdxTy != OffsetTy) {
if (!IdxTy.isVector() && WantSplatVector) {
IdxReg = MIRBuilder
- .buildBuildVectorSplatVector(
- OffsetTy.changeElementType(IdxTy), IdxReg)
+ .buildSplatBuildVector(OffsetTy.changeElementType(IdxTy),
+ IdxReg)
.getReg(0);
}
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index fa1f52027411d..9898e48dc551d 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -8244,7 +8244,7 @@ static Register getMemsetValue(Register Val, LLT Ty, MachineIRBuilder &MIB) {
// For vector types create a G_BUILD_VECTOR.
if (Ty.isVector())
- Val = MIB.buildBuildVectorSplatVector(Ty, Val).getReg(0);
+ Val = MIB.buildSplatBuildVector(Ty, Val).getReg(0);
return Val;
}
diff --git a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
index 4f244bea33501..8695470c36b85 100644
--- a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
@@ -321,7 +321,7 @@ MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
auto Const = buildInstr(TargetOpcode::G_CONSTANT)
.addDef(getMRI()->createGenericVirtualRegister(EltTy))
.addCImm(&Val);
- return buildBuildVectorSplatVector(Res, Const);
+ return buildSplatBuildVector(Res, Const);
}
auto Const = buildInstr(TargetOpcode::G_CONSTANT);
@@ -358,7 +358,7 @@ MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
.addDef(getMRI()->createGenericVirtualRegister(EltTy))
.addFPImm(&Val);
- return buildBuildVectorSplatVector(Res, Const);
+ return buildSplatBuildVector(Res, Const);
}
auto Const = buildInstr(TargetOpcode::G_FCONSTANT);
@@ -706,9 +706,8 @@ MachineIRBuilder::buildBuildVectorConstant(const DstOp &Res,
return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
}
-MachineInstrBuilder
-MachineIRBuilder::buildBuildVectorSplatVector(const DstOp &Res,
- const SrcOp &Src) {
+MachineInstrBuilder MachineIRBuilder::buildSplatBuildVector(const DstOp &Res,
+ const SrcOp &Src) {
SmallVector<SrcOp, 8> TmpVec(Res.getLLTTy(*getMRI()).getNumElements(), Src);
return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
}
diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp
index cf8b31c13bd19..f7017efba808a 100644
--- a/llvm/lib/CodeGen/MachineVerifier.cpp
+++ b/llvm/lib/CodeGen/MachineVerifier.cpp
@@ -1637,7 +1637,7 @@ void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
break;
}
- case TargetOpcode::G_SPLAT_VECTOR : {
+ case TargetOpcode::G_SPLAT_VECTOR: {
LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
>From b8b92ea4233d40c9227a8f5ad195f54e96aba39d Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Wed, 7 Feb 2024 15:08:46 -0800
Subject: [PATCH 6/6] fixup! fix failing tests
---
.../AArch64/GlobalISel/legalizer-info-validation.mir | 3 +++
llvm/unittests/CodeGen/GlobalISel/LegalizerHelperTest.cpp | 4 ++--
llvm/unittests/CodeGen/GlobalISel/PatternMatchTest.cpp | 6 +++---
3 files changed, 8 insertions(+), 5 deletions(-)
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
index c90c31aa27ef5..555d472ab5e64 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
@@ -622,6 +622,9 @@
# DEBUG-NEXT: G_SHUFFLE_VECTOR (opcode {{[0-9]+}}): 2 type indices, 0 imm indices
# DEBUG-NEXT: .. type index coverage check SKIPPED: user-defined predicate detected
# DEBUG-NEXT: .. imm index coverage check SKIPPED: user-defined predicate detected
+# DEBUG-NEXT: G_SPLAT_VECTOR (opcode 217): 2 type indices, 0 imm indices
+# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined
+# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined
# DEBUG-NEXT: G_CTTZ (opcode {{[0-9]+}}): 2 type indices, 0 imm indices
# DEBUG-NEXT: .. type index coverage check SKIPPED: user-defined predicate detected
# DEBUG-NEXT: .. imm index coverage check SKIPPED: user-defined predicate detected
diff --git a/llvm/unittests/CodeGen/GlobalISel/LegalizerHelperTest.cpp b/llvm/unittests/CodeGen/GlobalISel/LegalizerHelperTest.cpp
index 73837279701a9..33155d2c9a964 100644
--- a/llvm/unittests/CodeGen/GlobalISel/LegalizerHelperTest.cpp
+++ b/llvm/unittests/CodeGen/GlobalISel/LegalizerHelperTest.cpp
@@ -147,9 +147,9 @@ TEST_F(AArch64GISelMITest, LowerRotatesVector) {
LLT S32 = LLT::scalar(32);
LLT V4S32 = LLT::fixed_vector(4, S32);
auto SrcTrunc = B.buildTrunc(S32, Copies[0]);
- auto Src = B.buildSplatVector(V4S32, SrcTrunc);
+ auto Src = B.buildSplatBuildVector(V4S32, SrcTrunc);
auto AmtTrunc = B.buildTrunc(S32, Copies[1]);
- auto Amt = B.buildSplatVector(V4S32, AmtTrunc);
+ auto Amt = B.buildSplatBuildVector(V4S32, AmtTrunc);
auto ROTR = B.buildInstr(TargetOpcode::G_ROTR, {V4S32}, {Src, Amt});
AInfo Info(MF->getSubtarget());
diff --git a/llvm/unittests/CodeGen/GlobalISel/PatternMatchTest.cpp b/llvm/unittests/CodeGen/GlobalISel/PatternMatchTest.cpp
index f52e49df0bcde..59a86fa5646f3 100644
--- a/llvm/unittests/CodeGen/GlobalISel/PatternMatchTest.cpp
+++ b/llvm/unittests/CodeGen/GlobalISel/PatternMatchTest.cpp
@@ -61,7 +61,7 @@ TEST_F(AArch64GISelMITest, MatchIntConstantSplat) {
LLT v4s64 = LLT::fixed_vector(4, s64);
MachineInstrBuilder FortyTwoSplat =
- B.buildSplatVector(v4s64, B.buildConstant(s64, 42));
+ B.buildSplatBuildVector(v4s64, B.buildConstant(s64, 42));
int64_t Cst;
EXPECT_TRUE(mi_match(FortyTwoSplat.getReg(0), *MRI, m_ICstOrSplat(Cst)));
EXPECT_EQ(Cst, 42);
@@ -625,7 +625,7 @@ TEST_F(AArch64GISelMITest, MatchSpecificConstantSplat) {
LLT v4s64 = LLT::fixed_vector(4, s64);
MachineInstrBuilder FortyTwoSplat =
- B.buildSplatVector(v4s64, B.buildConstant(s64, 42));
+ B.buildSplatBuildVector(v4s64, B.buildConstant(s64, 42));
MachineInstrBuilder FortyTwo = B.buildConstant(s64, 42);
EXPECT_TRUE(mi_match(FortyTwoSplat.getReg(0), *MRI, m_SpecificICstSplat(42)));
@@ -655,7 +655,7 @@ TEST_F(AArch64GISelMITest, MatchSpecificConstantOrSplat) {
LLT v4s64 = LLT::fixed_vector(4, s64);
MachineInstrBuilder FortyTwoSplat =
- B.buildSplatVector(v4s64, B.buildConstant(s64, 42));
+ B.buildSplatBuildVector(v4s64, B.buildConstant(s64, 42));
MachineInstrBuilder FortyTwo = B.buildConstant(s64, 42);
EXPECT_TRUE(
More information about the llvm-commits
mailing list