[llvm] [RISCV][GISEL] Add IRTranslation for insertelement with scalable vector type (PR #80377)
Michael Maitland via llvm-commits
llvm-commits at lists.llvm.org
Wed Feb 7 15:17:17 PST 2024
https://github.com/michaelmaitland updated https://github.com/llvm/llvm-project/pull/80377
>From 60a3b15d5b83a71a1622ca28ca685a6f19108d1f Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Thu, 1 Feb 2024 17:51:23 -0800
Subject: [PATCH] [RISCV][GISEL] Add IRTranslation for insertelement with
scalable vector type
---
llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp | 3 +-
.../CodeGen/GlobalISel/MachineIRBuilder.cpp | 4 +-
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 6 +-
.../GlobalISel/irtranslator/insertelement.ll | 1941 +++++++++++++++++
4 files changed, 1948 insertions(+), 6 deletions(-)
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/insertelement.ll
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index dd38317c26bff6..d8d6acd2b5a5c8 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -2956,7 +2956,8 @@ bool IRTranslator::translateInsertElement(const User &U,
MachineIRBuilder &MIRBuilder) {
// If it is a <1 x Ty> vector, use the scalar as it is
// not a legal vector type in LLT.
- if (cast<FixedVectorType>(U.getType())->getNumElements() == 1)
+ if (auto *FVT = dyn_cast<FixedVectorType>(U.getType());
+ FVT && FVT->getNumElements() == 1)
return translateCopy(U, *U.getOperand(1), MIRBuilder);
Register Res = getOrCreateVReg(U);
diff --git a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
index a5827c26c04f48..2596fa5f53cee3 100644
--- a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
@@ -1282,8 +1282,8 @@ MachineIRBuilder::buildInstr(unsigned Opc, ArrayRef<DstOp> DstOps,
SrcOps[1].getLLTTy(*getMRI()) &&
"Type mismatch");
assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index");
- assert(DstOps[0].getLLTTy(*getMRI()).getNumElements() ==
- SrcOps[0].getLLTTy(*getMRI()).getNumElements() &&
+ assert(DstOps[0].getLLTTy(*getMRI()).getElementCount() ==
+ SrcOps[0].getLLTTy(*getMRI()).getElementCount() &&
"Type mismatch");
break;
}
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 27037f4d5c5c85..c1ca9dac3033f7 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -20608,11 +20608,11 @@ unsigned RISCVTargetLowering::getCustomCtpopCost(EVT VT,
bool RISCVTargetLowering::fallBackToDAGISel(const Instruction &Inst) const {
- // GISel support is in progress or complete for G_ADD, G_SUB, G_AND, G_OR, and
- // G_XOR.
+ // GISel support is in progress or complete for these opcodes.
unsigned Op = Inst.getOpcode();
if (Op == Instruction::Add || Op == Instruction::Sub ||
- Op == Instruction::And || Op == Instruction::Or || Op == Instruction::Xor)
+ Op == Instruction::And || Op == Instruction::Or ||
+ Op == Instruction::Xor || Op == Instruction::InsertElement)
return false;
if (Inst.getType()->isScalableTy())
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/insertelement.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/insertelement.ll
new file mode 100644
index 00000000000000..9255967a4da82e
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/insertelement.ll
@@ -0,0 +1,1941 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=riscv32 -mattr=+v -global-isel -stop-after=irtranslator \
+; RUN: -verify-machineinstrs < %s | FileCheck -check-prefixes=RV32 %s
+; RUN: llc -mtriple=riscv64 -mattr=+v -global-isel -stop-after=irtranslator \
+; RUN: -verify-machineinstrs < %s | FileCheck -check-prefixes=RV64 %s
+
+define <vscale x 1 x i1> @insertelement_nxv1i1_0() {
+ ; RV32-LABEL: name: insertelement_nxv1i1_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
+ ; RV32-NEXT: $v0 = COPY [[IVEC]](<vscale x 1 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: insertelement_nxv1i1_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
+ ; RV64-NEXT: $v0 = COPY [[IVEC]](<vscale x 1 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ %a = insertelement <vscale x 1 x i1> poison, i1 0, i32 0
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @insertelement_nxv1i1_1() {
+ ; RV32-LABEL: name: insertelement_nxv1i1_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
+ ; RV32-NEXT: $v0 = COPY [[IVEC]](<vscale x 1 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: insertelement_nxv1i1_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
+ ; RV64-NEXT: $v0 = COPY [[IVEC]](<vscale x 1 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ %a = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @insertelement_nxv1i1_2(i1 %x) {
+ ; RV32-LABEL: name: insertelement_nxv1i1_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s32)
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C]](s32)
+ ; RV32-NEXT: $v0 = COPY [[IVEC]](<vscale x 1 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: insertelement_nxv1i1_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s64)
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C]](s32)
+ ; RV64-NEXT: $v0 = COPY [[IVEC]](<vscale x 1 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ %a = insertelement <vscale x 1 x i1> poison, i1 %x, i32 0
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @insertelement_nxv2i1_0() {
+ ; RV32-LABEL: name: insertelement_nxv2i1_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
+ ; RV32-NEXT: $v0 = COPY [[IVEC]](<vscale x 2 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: insertelement_nxv2i1_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
+ ; RV64-NEXT: $v0 = COPY [[IVEC]](<vscale x 2 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ %a = insertelement <vscale x 2 x i1> poison, i1 0, i32 0
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @insertelement_nxv2i1_1() {
+ ; RV32-LABEL: name: insertelement_nxv2i1_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
+ ; RV32-NEXT: $v0 = COPY [[IVEC]](<vscale x 2 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: insertelement_nxv2i1_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
+ ; RV64-NEXT: $v0 = COPY [[IVEC]](<vscale x 2 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ %a = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @insertelement_nxv2i1_2(i1 %x) {
+ ; RV32-LABEL: name: insertelement_nxv2i1_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s32)
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C]](s32)
+ ; RV32-NEXT: $v0 = COPY [[IVEC]](<vscale x 2 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: insertelement_nxv2i1_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s64)
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C]](s32)
+ ; RV64-NEXT: $v0 = COPY [[IVEC]](<vscale x 2 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ %a = insertelement <vscale x 2 x i1> poison, i1 %x, i32 0
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @insertelement_nxv4i1_0() {
+ ; RV32-LABEL: name: insertelement_nxv4i1_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
+ ; RV32-NEXT: $v0 = COPY [[IVEC]](<vscale x 4 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: insertelement_nxv4i1_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
+ ; RV64-NEXT: $v0 = COPY [[IVEC]](<vscale x 4 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ %a = insertelement <vscale x 4 x i1> poison, i1 0, i32 0
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @insertelement_nxv4i1_1() {
+ ; RV32-LABEL: name: insertelement_nxv4i1_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
+ ; RV32-NEXT: $v0 = COPY [[IVEC]](<vscale x 4 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: insertelement_nxv4i1_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
+ ; RV64-NEXT: $v0 = COPY [[IVEC]](<vscale x 4 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ %a = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @insertelement_nxv4i1_2(i1 %x) {
+ ; RV32-LABEL: name: insertelement_nxv4i1_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s32)
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C]](s32)
+ ; RV32-NEXT: $v0 = COPY [[IVEC]](<vscale x 4 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: insertelement_nxv4i1_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s64)
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C]](s32)
+ ; RV64-NEXT: $v0 = COPY [[IVEC]](<vscale x 4 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ %a = insertelement <vscale x 4 x i1> poison, i1 %x, i32 0
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @insertelement_nxv8i1_0() {
+ ; RV32-LABEL: name: insertelement_nxv8i1_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
+ ; RV32-NEXT: $v0 = COPY [[IVEC]](<vscale x 8 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: insertelement_nxv8i1_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
+ ; RV64-NEXT: $v0 = COPY [[IVEC]](<vscale x 8 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ %a = insertelement <vscale x 8 x i1> poison, i1 0, i32 0
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @insertelement_nxv8i1_1() {
+ ; RV32-LABEL: name: insertelement_nxv8i1_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
+ ; RV32-NEXT: $v0 = COPY [[IVEC]](<vscale x 8 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: insertelement_nxv8i1_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
+ ; RV64-NEXT: $v0 = COPY [[IVEC]](<vscale x 8 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ %a = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @insertelement_nxv8i1_2(i1 %x) {
+ ; RV32-LABEL: name: insertelement_nxv8i1_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s32)
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C]](s32)
+ ; RV32-NEXT: $v0 = COPY [[IVEC]](<vscale x 8 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: insertelement_nxv8i1_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s64)
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C]](s32)
+ ; RV64-NEXT: $v0 = COPY [[IVEC]](<vscale x 8 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ %a = insertelement <vscale x 8 x i1> poison, i1 %x, i32 0
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @insertelement_nxv16i1_0() {
+ ; RV32-LABEL: name: insertelement_nxv16i1_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
+ ; RV32-NEXT: $v0 = COPY [[IVEC]](<vscale x 16 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: insertelement_nxv16i1_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
+ ; RV64-NEXT: $v0 = COPY [[IVEC]](<vscale x 16 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ %a = insertelement <vscale x 16 x i1> poison, i1 0, i32 0
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 16 x i1> @insertelement_nxv16i1_1() {
+ ; RV32-LABEL: name: insertelement_nxv16i1_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
+ ; RV32-NEXT: $v0 = COPY [[IVEC]](<vscale x 16 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: insertelement_nxv16i1_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
+ ; RV64-NEXT: $v0 = COPY [[IVEC]](<vscale x 16 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ %a = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 16 x i1> @insertelement_nxv16i1_2(i1 %x) {
+ ; RV32-LABEL: name: insertelement_nxv16i1_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s32)
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C]](s32)
+ ; RV32-NEXT: $v0 = COPY [[IVEC]](<vscale x 16 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: insertelement_nxv16i1_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s64)
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C]](s32)
+ ; RV64-NEXT: $v0 = COPY [[IVEC]](<vscale x 16 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ %a = insertelement <vscale x 16 x i1> poison, i1 %x, i32 0
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 4 x i1> @insertelement_nxv4i1_3(<vscale x 4 x i1> %v, i1 %x) {
+ ; RV32-LABEL: name: insertelement_nxv4i1_3
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $v0, $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v0
+ ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY1]](s32)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s1>) = G_INSERT_VECTOR_ELT [[COPY]], [[TRUNC]](s1), [[C]](s32)
+ ; RV32-NEXT: $v0 = COPY [[IVEC]](<vscale x 4 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: insertelement_nxv4i1_3
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $v0, $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v0
+ ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY1]](s64)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s1>) = G_INSERT_VECTOR_ELT [[COPY]], [[TRUNC]](s1), [[C]](s32)
+ ; RV64-NEXT: $v0 = COPY [[IVEC]](<vscale x 4 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ %a = insertelement <vscale x 4 x i1> %v, i1 %x, i32 0
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 1 x i8> @insertelement_nxv1i8_0() {
+ ; RV32-LABEL: name: insertelement_nxv1i8_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv1i8_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 1 x i8> poison, i8 0, i32 0
+ ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @insertelement_nxv1i8_1() {
+ ; RV32-LABEL: name: insertelement_nxv1i8_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 -1
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv1i8_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 -1
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 1 x i8> poison, i8 -1, i32 0
+ ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @insertelement_nxv1i8_2(i8 %x) {
+ ; RV32-LABEL: name: insertelement_nxv1i8_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv1i8_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s64)
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 1 x i8> poison, i8 %x, i32 0
+ ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @insertelement_nxv2i8_0() {
+ ; RV32-LABEL: name: insertelement_nxv2i8_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv2i8_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 2 x i8> poison, i8 0, i32 0
+ ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @insertelement_nxv2i8_1() {
+ ; RV32-LABEL: name: insertelement_nxv2i8_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 -1
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv2i8_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 -1
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 2 x i8> poison, i8 -1, i32 0
+ ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @insertelement_nxv2i8_2(i8 %x) {
+ ; RV32-LABEL: name: insertelement_nxv2i8_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv2i8_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s64)
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 2 x i8> poison, i8 %x, i32 0
+ ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @insertelement_nxv4i8_0() {
+ ; RV32-LABEL: name: insertelement_nxv4i8_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv4i8_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 4 x i8> poison, i8 0, i32 0
+ ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @insertelement_nxv4i8_1() {
+ ; RV32-LABEL: name: insertelement_nxv4i8_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 -1
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv4i8_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 -1
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 4 x i8> poison, i8 -1, i32 0
+ ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @insertelement_nxv4i8_2(i8 %x) {
+ ; RV32-LABEL: name: insertelement_nxv4i8_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv4i8_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s64)
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 4 x i8> poison, i8 %x, i32 0
+ ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @insertelement_nxv8i8_0() {
+ ; RV32-LABEL: name: insertelement_nxv8i8_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 8 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv8i8_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 8 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 8 x i8> poison, i8 0, i32 0
+ ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @insertelement_nxv8i8_1() {
+ ; RV32-LABEL: name: insertelement_nxv8i8_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 -1
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 8 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv8i8_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 -1
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 8 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 8 x i8> poison, i8 -1, i32 0
+ ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @insertelement_nxv8i8_2(i8 %x) {
+ ; RV32-LABEL: name: insertelement_nxv8i8_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 8 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv8i8_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s64)
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 8 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 8 x i8> poison, i8 %x, i32 0
+ ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @insertelement_nxv16i8_0() {
+ ; RV32-LABEL: name: insertelement_nxv16i8_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
+ ; RV32-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 16 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: insertelement_nxv16i8_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
+ ; RV64-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 16 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %a = insertelement <vscale x 16 x i8> poison, i8 0, i32 0
+ ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @insertelement_nxv16i8_1() {
+ ; RV32-LABEL: name: insertelement_nxv16i8_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 -1
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
+ ; RV32-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 16 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: insertelement_nxv16i8_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 -1
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
+ ; RV64-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 16 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %a = insertelement <vscale x 16 x i8> poison, i8 -1, i32 0
+ ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @insertelement_nxv16i8_2(i8 %x) {
+ ; RV32-LABEL: name: insertelement_nxv16i8_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C]](s32)
+ ; RV32-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 16 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: insertelement_nxv16i8_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s64)
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C]](s32)
+ ; RV64-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 16 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %a = insertelement <vscale x 16 x i8> poison, i8 %x, i32 0
+ ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 4 x i8> @insertelement_nxv4i8_3(<vscale x 4 x i8> %v, i8 %x) {
+ ; RV32-LABEL: name: insertelement_nxv4i8_3
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $v8, $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s8>) = G_INSERT_VECTOR_ELT [[COPY]], [[TRUNC]](s8), [[C]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv4i8_3
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $v8, $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s64)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s8>) = G_INSERT_VECTOR_ELT [[COPY]], [[TRUNC]](s8), [[C]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 4 x i8> %v, i8 %x, i32 0
+ ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 1 x i16> @insertelement_nxv1i16_0() {
+ ; RV32-LABEL: name: insertelement_nxv1i16_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv1i16_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 1 x i16> poison, i16 0, i32 0
+ ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 1 x i16> @insertelement_nxv1i16_1() {
+ ; RV32-LABEL: name: insertelement_nxv1i16_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv1i16_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 1 x i16> poison, i16 -1, i32 0
+ ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 1 x i16> @insertelement_nxv1i16_2(i16 %x) {
+ ; RV32-LABEL: name: insertelement_nxv1i16_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv1i16_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s64)
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 1 x i16> poison, i16 %x, i32 0
+ ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @insertelement_nxv2i16_0() {
+ ; RV32-LABEL: name: insertelement_nxv2i16_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv2i16_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 2 x i16> poison, i16 0, i32 0
+ ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 2 x i16> @insertelement_nxv2i16_1() {
+ ; RV32-LABEL: name: insertelement_nxv2i16_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv2i16_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 2 x i16> poison, i16 -1, i32 0
+ ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 2 x i16> @insertelement_nxv2i16_2(i16 %x) {
+ ; RV32-LABEL: name: insertelement_nxv2i16_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv2i16_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s64)
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 2 x i16> poison, i16 %x, i32 0
+ ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @insertelement_nxv4i16_0() {
+ ; RV32-LABEL: name: insertelement_nxv4i16_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv4i16_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 4 x i16> poison, i16 0, i32 0
+ ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 4 x i16> @insertelement_nxv4i16_1() {
+ ; RV32-LABEL: name: insertelement_nxv4i16_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv4i16_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 4 x i16> poison, i16 -1, i32 0
+ ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 4 x i16> @insertelement_nxv4i16_2(i16 %x) {
+ ; RV32-LABEL: name: insertelement_nxv4i16_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv4i16_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s64)
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 4 x i16> poison, i16 %x, i32 0
+ ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @insertelement_nxv8i16_0() {
+ ; RV32-LABEL: name: insertelement_nxv8i16_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
+ ; RV32-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 8 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: insertelement_nxv8i16_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
+ ; RV64-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 8 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %a = insertelement <vscale x 8 x i16> poison, i16 0, i32 0
+ ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 8 x i16> @insertelement_nxv8i16_1() {
+ ; RV32-LABEL: name: insertelement_nxv8i16_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
+ ; RV32-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 8 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: insertelement_nxv8i16_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
+ ; RV64-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 8 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %a = insertelement <vscale x 8 x i16> poison, i16 -1, i32 0
+ ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 8 x i16> @insertelement_nxv8i16_2(i16 %x) {
+ ; RV32-LABEL: name: insertelement_nxv8i16_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C]](s32)
+ ; RV32-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 8 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: insertelement_nxv8i16_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s64)
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C]](s32)
+ ; RV64-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 8 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %a = insertelement <vscale x 8 x i16> poison, i16 %x, i32 0
+ ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @insertelement_nxv16i16_0() {
+ ; RV32-LABEL: name: insertelement_nxv16i16_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
+ ; RV32-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 16 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: insertelement_nxv16i16_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
+ ; RV64-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 16 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %a = insertelement <vscale x 16 x i16> poison, i16 0, i32 0
+ ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 16 x i16> @insertelement_nxv16i16_1() {
+ ; RV32-LABEL: name: insertelement_nxv16i16_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
+ ; RV32-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 16 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: insertelement_nxv16i16_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
+ ; RV64-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 16 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %a = insertelement <vscale x 16 x i16> poison, i16 -1, i32 0
+ ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 16 x i16> @insertelement_nxv16i16_2(i16 %x) {
+ ; RV32-LABEL: name: insertelement_nxv16i16_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C]](s32)
+ ; RV32-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 16 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: insertelement_nxv16i16_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s64)
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C]](s32)
+ ; RV64-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 16 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %a = insertelement <vscale x 16 x i16> poison, i16 %x, i32 0
+ ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 4 x i16> @insertelement_nxv4i16(<vscale x 4 x i16> %v, i16 %x) {
+ ; RV32-LABEL: name: insertelement_nxv4i16
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $v8, $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s16>) = G_INSERT_VECTOR_ELT [[COPY]], [[TRUNC]](s16), [[C]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv4i16
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $v8, $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s64)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s16>) = G_INSERT_VECTOR_ELT [[COPY]], [[TRUNC]](s16), [[C]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 4 x i16> %v, i16 %x, i32 0
+ ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 1 x i32> @insertelement_nxv1i32_0() {
+ ; RV32-LABEL: name: insertelement_nxv1i32_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv1i32_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 1 x i32> poison, i32 0, i32 0
+ ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 1 x i32> @insertelement_nxv1i32_1() {
+ ; RV32-LABEL: name: insertelement_nxv1i32_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C1]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv1i32_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C1]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 1 x i32> poison, i32 -1, i32 0
+ ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 1 x i32> @insertelement_nxv1i32_2(i32 %x) {
+ ; RV32-LABEL: name: insertelement_nxv1i32_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[COPY]](s32), [[C]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv1i32_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s32), [[C]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 1 x i32> poison, i32 %x, i32 0
+ ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @insertelement_nxv2i32_0() {
+ ; RV32-LABEL: name: insertelement_nxv2i32_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv2i32_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 2 x i32> poison, i32 0, i32 0
+ ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 2 x i32> @insertelement_nxv2i32_1() {
+ ; RV32-LABEL: name: insertelement_nxv2i32_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C1]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv2i32_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C1]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 2 x i32> poison, i32 -1, i32 0
+ ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 2 x i32> @insertelement_nxv2i32_2(i32 %x) {
+ ; RV32-LABEL: name: insertelement_nxv2i32_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[COPY]](s32), [[C]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv2i32_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s32), [[C]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 2 x i32> poison, i32 %x, i32 0
+ ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @insertelement_nxv4i32_0() {
+ ; RV32-LABEL: name: insertelement_nxv4i32_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C]](s32)
+ ; RV32-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 4 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: insertelement_nxv4i32_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C]](s32)
+ ; RV64-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 4 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %a = insertelement <vscale x 4 x i32> poison, i32 0, i32 0
+ ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 4 x i32> @insertelement_nxv4i32_1() {
+ ; RV32-LABEL: name: insertelement_nxv4i32_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C1]](s32)
+ ; RV32-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 4 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: insertelement_nxv4i32_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C1]](s32)
+ ; RV64-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 4 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %a = insertelement <vscale x 4 x i32> poison, i32 -1, i32 0
+ ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 4 x i32> @insertelement_nxv4i32_2(i32 %x) {
+ ; RV32-LABEL: name: insertelement_nxv4i32_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[COPY]](s32), [[C]](s32)
+ ; RV32-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 4 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: insertelement_nxv4i32_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s32), [[C]](s32)
+ ; RV64-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 4 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %a = insertelement <vscale x 4 x i32> poison, i32 %x, i32 0
+ ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @insertelement_nxv8i32_0() {
+ ; RV32-LABEL: name: insertelement_nxv8i32_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C]](s32)
+ ; RV32-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 8 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: insertelement_nxv8i32_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C]](s32)
+ ; RV64-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 8 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %a = insertelement <vscale x 8 x i32> poison, i32 0, i32 0
+ ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 8 x i32> @insertelement_nxv8i32_1() {
+ ; RV32-LABEL: name: insertelement_nxv8i32_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C1]](s32)
+ ; RV32-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 8 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: insertelement_nxv8i32_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C1]](s32)
+ ; RV64-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 8 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %a = insertelement <vscale x 8 x i32> poison, i32 -1, i32 0
+ ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 8 x i32> @insertelement_nxv8i32_2(i32 %x) {
+ ; RV32-LABEL: name: insertelement_nxv8i32_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[COPY]](s32), [[C]](s32)
+ ; RV32-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 8 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: insertelement_nxv8i32_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s32), [[C]](s32)
+ ; RV64-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 8 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %a = insertelement <vscale x 8 x i32> poison, i32 %x, i32 0
+ ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @insertelement_nxv16i32_0() {
+ ; RV32-LABEL: name: insertelement_nxv16i32_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C]](s32)
+ ; RV32-NEXT: $v8m8 = COPY [[IVEC]](<vscale x 16 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv16i32_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C]](s32)
+ ; RV64-NEXT: $v8m8 = COPY [[IVEC]](<vscale x 16 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %a = insertelement <vscale x 16 x i32> poison, i32 0, i32 0
+ ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 16 x i32> @insertelement_nxv16i32_1() {
+ ; RV32-LABEL: name: insertelement_nxv16i32_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C1]](s32)
+ ; RV32-NEXT: $v8m8 = COPY [[IVEC]](<vscale x 16 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv16i32_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C1]](s32)
+ ; RV64-NEXT: $v8m8 = COPY [[IVEC]](<vscale x 16 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %a = insertelement <vscale x 16 x i32> poison, i32 -1, i32 0
+ ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 16 x i32> @insertelement_nxv16i32_2(i32 %x) {
+ ; RV32-LABEL: name: insertelement_nxv16i32_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[COPY]](s32), [[C]](s32)
+ ; RV32-NEXT: $v8m8 = COPY [[IVEC]](<vscale x 16 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv16i32_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s32), [[C]](s32)
+ ; RV64-NEXT: $v8m8 = COPY [[IVEC]](<vscale x 16 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %a = insertelement <vscale x 16 x i32> poison, i32 %x, i32 0
+ ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 4 x i32> @insertelement_nxv4i32(<vscale x 4 x i32> %v, i32 %x) {
+ ; RV32-LABEL: name: insertelement_nxv4i32
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10, $v8m2
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
+ ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s32>) = G_INSERT_VECTOR_ELT [[COPY]], [[COPY1]](s32), [[C]](s32)
+ ; RV32-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 4 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: insertelement_nxv4i32
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10, $v8m2
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
+ ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s32>) = G_INSERT_VECTOR_ELT [[COPY]], [[TRUNC]](s32), [[C]](s32)
+ ; RV64-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 4 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %a = insertelement <vscale x 4 x i32> %v, i32 %x, i32 0
+ ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 1 x i64> @insertelement_nxv1i64_0() {
+ ; RV32-LABEL: name: insertelement_nxv1i64_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv1i64_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 1 x i64> poison, i64 0, i32 0
+ ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 1 x i64> @insertelement_nxv1i64_1() {
+ ; RV32-LABEL: name: insertelement_nxv1i64_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv1i64_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 1 x i64> poison, i64 -1, i32 0
+ ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 1 x i64> @insertelement_nxv1i64_2(i64 %x) {
+ ; RV32-LABEL: name: insertelement_nxv1i64_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10, $x11
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+ ; RV32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[MV]](s64), [[C]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv1i64_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[COPY]](s64), [[C]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 1 x i64> poison, i64 %x, i32 0
+ ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 2 x i64> @insertelement_nxv2i64_0() {
+ ; RV32-LABEL: name: insertelement_nxv2i64_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s32)
+ ; RV32-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 2 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: insertelement_nxv2i64_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s32)
+ ; RV64-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 2 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %a = insertelement <vscale x 2 x i64> poison, i64 0, i32 0
+ ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 2 x i64> @insertelement_nxv2i64_1() {
+ ; RV32-LABEL: name: insertelement_nxv2i64_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s32)
+ ; RV32-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 2 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: insertelement_nxv2i64_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s32)
+ ; RV64-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 2 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %a = insertelement <vscale x 2 x i64> poison, i64 -1, i32 0
+ ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 2 x i64> @insertelement_nxv2i64_2(i64 %x) {
+ ; RV32-LABEL: name: insertelement_nxv2i64_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10, $x11
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+ ; RV32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[MV]](s64), [[C]](s32)
+ ; RV32-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 2 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: insertelement_nxv2i64_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[COPY]](s64), [[C]](s32)
+ ; RV64-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 2 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %a = insertelement <vscale x 2 x i64> poison, i64 %x, i32 0
+ ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 4 x i64> @insertelement_nxv4i64_0() {
+ ; RV32-LABEL: name: insertelement_nxv4i64_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s32)
+ ; RV32-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 4 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: insertelement_nxv4i64_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s32)
+ ; RV64-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 4 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %a = insertelement <vscale x 4 x i64> poison, i64 0, i32 0
+ ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 4 x i64> @insertelement_nxv4i64_1() {
+ ; RV32-LABEL: name: insertelement_nxv4i64_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s32)
+ ; RV32-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 4 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: insertelement_nxv4i64_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s32)
+ ; RV64-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 4 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %a = insertelement <vscale x 4 x i64> poison, i64 -1, i32 0
+ ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 4 x i64> @insertelement_nxv4i64_2(i64 %x) {
+ ; RV32-LABEL: name: insertelement_nxv4i64_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10, $x11
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+ ; RV32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[MV]](s64), [[C]](s32)
+ ; RV32-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 4 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: insertelement_nxv4i64_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[COPY]](s64), [[C]](s32)
+ ; RV64-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 4 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %a = insertelement <vscale x 4 x i64> poison, i64 %x, i32 0
+ ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 8 x i64> @insertelement_nxv8i64_0() {
+ ; RV32-LABEL: name: insertelement_nxv8i64_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s32)
+ ; RV32-NEXT: $v8m8 = COPY [[IVEC]](<vscale x 8 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv8i64_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s32)
+ ; RV64-NEXT: $v8m8 = COPY [[IVEC]](<vscale x 8 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %a = insertelement <vscale x 8 x i64> poison, i64 0, i32 0
+ ret <vscale x 8 x i64> %a
+}
+
+define <vscale x 8 x i64> @insertelement_nxv8i64_1() {
+ ; RV32-LABEL: name: insertelement_nxv8i64_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s32)
+ ; RV32-NEXT: $v8m8 = COPY [[IVEC]](<vscale x 8 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv8i64_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s32)
+ ; RV64-NEXT: $v8m8 = COPY [[IVEC]](<vscale x 8 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %a = insertelement <vscale x 8 x i64> poison, i64 -1, i32 0
+ ret <vscale x 8 x i64> %a
+}
+
+define <vscale x 8 x i64> @insertelement_nxv8i64_2(i64 %x) {
+ ; RV32-LABEL: name: insertelement_nxv8i64_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10, $x11
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+ ; RV32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[MV]](s64), [[C]](s32)
+ ; RV32-NEXT: $v8m8 = COPY [[IVEC]](<vscale x 8 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv8i64_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[COPY]](s64), [[C]](s32)
+ ; RV64-NEXT: $v8m8 = COPY [[IVEC]](<vscale x 8 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %a = insertelement <vscale x 8 x i64> poison, i64 %x, i32 0
+ ret <vscale x 8 x i64> %a
+}
+
+define <vscale x 16 x i64> @insertelement_nxv16i64_0() {
+ ; RV32-LABEL: name: insertelement_nxv16i64_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s32)
+ ; RV32-NEXT: [[UV:%[0-9]+]]:_(<vscale x 8 x s64>), [[UV1:%[0-9]+]]:_(<vscale x 8 x s64>) = G_UNMERGE_VALUES [[IVEC]](<vscale x 16 x s64>)
+ ; RV32-NEXT: $v8m8 = COPY [[UV]](<vscale x 8 x s64>)
+ ; RV32-NEXT: $v16m8 = COPY [[UV1]](<vscale x 8 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8, implicit $v16m8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv16i64_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s32)
+ ; RV64-NEXT: [[UV:%[0-9]+]]:_(<vscale x 8 x s64>), [[UV1:%[0-9]+]]:_(<vscale x 8 x s64>) = G_UNMERGE_VALUES [[IVEC]](<vscale x 16 x s64>)
+ ; RV64-NEXT: $v8m8 = COPY [[UV]](<vscale x 8 x s64>)
+ ; RV64-NEXT: $v16m8 = COPY [[UV1]](<vscale x 8 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8, implicit $v16m8
+ %a = insertelement <vscale x 16 x i64> poison, i64 0, i32 0
+ ret <vscale x 16 x i64> %a
+}
+
+define <vscale x 16 x i64> @insertelement_nxv16i64_1() {
+ ; RV32-LABEL: name: insertelement_nxv16i64_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s32)
+ ; RV32-NEXT: [[UV:%[0-9]+]]:_(<vscale x 8 x s64>), [[UV1:%[0-9]+]]:_(<vscale x 8 x s64>) = G_UNMERGE_VALUES [[IVEC]](<vscale x 16 x s64>)
+ ; RV32-NEXT: $v8m8 = COPY [[UV]](<vscale x 8 x s64>)
+ ; RV32-NEXT: $v16m8 = COPY [[UV1]](<vscale x 8 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8, implicit $v16m8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv16i64_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s32)
+ ; RV64-NEXT: [[UV:%[0-9]+]]:_(<vscale x 8 x s64>), [[UV1:%[0-9]+]]:_(<vscale x 8 x s64>) = G_UNMERGE_VALUES [[IVEC]](<vscale x 16 x s64>)
+ ; RV64-NEXT: $v8m8 = COPY [[UV]](<vscale x 8 x s64>)
+ ; RV64-NEXT: $v16m8 = COPY [[UV1]](<vscale x 8 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8, implicit $v16m8
+ %a = insertelement <vscale x 16 x i64> poison, i64 -1, i32 0
+ ret <vscale x 16 x i64> %a
+}
+
+define <vscale x 16 x i64> @insertelement_nxv16i64_2(i64 %x) {
+ ; RV32-LABEL: name: insertelement_nxv16i64_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10, $x11
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+ ; RV32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[MV]](s64), [[C]](s32)
+ ; RV32-NEXT: [[UV:%[0-9]+]]:_(<vscale x 8 x s64>), [[UV1:%[0-9]+]]:_(<vscale x 8 x s64>) = G_UNMERGE_VALUES [[IVEC]](<vscale x 16 x s64>)
+ ; RV32-NEXT: $v8m8 = COPY [[UV]](<vscale x 8 x s64>)
+ ; RV32-NEXT: $v16m8 = COPY [[UV1]](<vscale x 8 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8, implicit $v16m8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv16i64_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[COPY]](s64), [[C]](s32)
+ ; RV64-NEXT: [[UV:%[0-9]+]]:_(<vscale x 8 x s64>), [[UV1:%[0-9]+]]:_(<vscale x 8 x s64>) = G_UNMERGE_VALUES [[IVEC]](<vscale x 16 x s64>)
+ ; RV64-NEXT: $v8m8 = COPY [[UV]](<vscale x 8 x s64>)
+ ; RV64-NEXT: $v16m8 = COPY [[UV1]](<vscale x 8 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8, implicit $v16m8
+ %a = insertelement <vscale x 16 x i64> poison, i64 %x, i32 0
+ ret <vscale x 16 x i64> %a
+}
+
+define <vscale x 4 x i64> @insertelement_nxv4i64(<vscale x 4 x i64> %v, i64 %x) {
+ ; RV32-LABEL: name: insertelement_nxv4i64
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10, $x11, $v8m4
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8m4
+ ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x11
+ ; RV32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY1]](s32), [[COPY2]](s32)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s64>) = G_INSERT_VECTOR_ELT [[COPY]], [[MV]](s64), [[C]](s32)
+ ; RV32-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 4 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: insertelement_nxv4i64
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10, $v8m4
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8m4
+ ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s64>) = G_INSERT_VECTOR_ELT [[COPY]], [[COPY1]](s64), [[C]](s32)
+ ; RV64-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 4 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %a = insertelement <vscale x 4 x i64> %v, i64 %x, i32 0
+ ret <vscale x 4 x i64> %a
+}
More information about the llvm-commits
mailing list