[llvm] [RISCV][GISEL] Add IRTranslation for insertelement with scalable vector type (PR #80377)
Michael Maitland via llvm-commits
llvm-commits at lists.llvm.org
Mon Feb 12 12:16:48 PST 2024
https://github.com/michaelmaitland updated https://github.com/llvm/llvm-project/pull/80377
>From dc75d0f9d79d8efa1eaa69dca615454e73e63e82 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Thu, 1 Feb 2024 17:51:23 -0800
Subject: [PATCH 1/2] [RISCV][GISEL] Add IRTranslation for insertelement with
scalable vector type
---
llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp | 3 +-
.../CodeGen/GlobalISel/MachineIRBuilder.cpp | 4 +-
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 6 +-
.../GlobalISel/irtranslator/insertelement.ll | 1941 +++++++++++++++++
4 files changed, 1948 insertions(+), 6 deletions(-)
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/insertelement.ll
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index c1d8e890a66edb..87766ce2b1401b 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -2963,7 +2963,8 @@ bool IRTranslator::translateInsertElement(const User &U,
MachineIRBuilder &MIRBuilder) {
// If it is a <1 x Ty> vector, use the scalar as it is
// not a legal vector type in LLT.
- if (cast<FixedVectorType>(U.getType())->getNumElements() == 1)
+ if (auto *FVT = dyn_cast<FixedVectorType>(U.getType());
+ FVT && FVT->getNumElements() == 1)
return translateCopy(U, *U.getOperand(1), MIRBuilder);
Register Res = getOrCreateVReg(U);
diff --git a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
index a5827c26c04f48..2596fa5f53cee3 100644
--- a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
@@ -1282,8 +1282,8 @@ MachineIRBuilder::buildInstr(unsigned Opc, ArrayRef<DstOp> DstOps,
SrcOps[1].getLLTTy(*getMRI()) &&
"Type mismatch");
assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index");
- assert(DstOps[0].getLLTTy(*getMRI()).getNumElements() ==
- SrcOps[0].getLLTTy(*getMRI()).getNumElements() &&
+ assert(DstOps[0].getLLTTy(*getMRI()).getElementCount() ==
+ SrcOps[0].getLLTTy(*getMRI()).getElementCount() &&
"Type mismatch");
break;
}
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 12c0cd53514dae..df49f658051624 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -20629,11 +20629,11 @@ unsigned RISCVTargetLowering::getCustomCtpopCost(EVT VT,
bool RISCVTargetLowering::fallBackToDAGISel(const Instruction &Inst) const {
- // GISel support is in progress or complete for G_ADD, G_SUB, G_AND, G_OR, and
- // G_XOR.
+ // GISel support is in progress or complete for these opcodes.
unsigned Op = Inst.getOpcode();
if (Op == Instruction::Add || Op == Instruction::Sub ||
- Op == Instruction::And || Op == Instruction::Or || Op == Instruction::Xor)
+ Op == Instruction::And || Op == Instruction::Or ||
+ Op == Instruction::Xor || Op == Instruction::InsertElement)
return false;
if (Inst.getType()->isScalableTy())
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/insertelement.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/insertelement.ll
new file mode 100644
index 00000000000000..9255967a4da82e
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/insertelement.ll
@@ -0,0 +1,1941 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=riscv32 -mattr=+v -global-isel -stop-after=irtranslator \
+; RUN: -verify-machineinstrs < %s | FileCheck -check-prefixes=RV32 %s
+; RUN: llc -mtriple=riscv64 -mattr=+v -global-isel -stop-after=irtranslator \
+; RUN: -verify-machineinstrs < %s | FileCheck -check-prefixes=RV64 %s
+
+define <vscale x 1 x i1> @insertelement_nxv1i1_0() {
+ ; RV32-LABEL: name: insertelement_nxv1i1_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
+ ; RV32-NEXT: $v0 = COPY [[IVEC]](<vscale x 1 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: insertelement_nxv1i1_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
+ ; RV64-NEXT: $v0 = COPY [[IVEC]](<vscale x 1 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ %a = insertelement <vscale x 1 x i1> poison, i1 0, i32 0
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @insertelement_nxv1i1_1() {
+ ; RV32-LABEL: name: insertelement_nxv1i1_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
+ ; RV32-NEXT: $v0 = COPY [[IVEC]](<vscale x 1 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: insertelement_nxv1i1_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
+ ; RV64-NEXT: $v0 = COPY [[IVEC]](<vscale x 1 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ %a = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @insertelement_nxv1i1_2(i1 %x) {
+ ; RV32-LABEL: name: insertelement_nxv1i1_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s32)
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C]](s32)
+ ; RV32-NEXT: $v0 = COPY [[IVEC]](<vscale x 1 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: insertelement_nxv1i1_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s64)
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C]](s32)
+ ; RV64-NEXT: $v0 = COPY [[IVEC]](<vscale x 1 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ %a = insertelement <vscale x 1 x i1> poison, i1 %x, i32 0
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @insertelement_nxv2i1_0() {
+ ; RV32-LABEL: name: insertelement_nxv2i1_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
+ ; RV32-NEXT: $v0 = COPY [[IVEC]](<vscale x 2 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: insertelement_nxv2i1_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
+ ; RV64-NEXT: $v0 = COPY [[IVEC]](<vscale x 2 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ %a = insertelement <vscale x 2 x i1> poison, i1 0, i32 0
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @insertelement_nxv2i1_1() {
+ ; RV32-LABEL: name: insertelement_nxv2i1_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
+ ; RV32-NEXT: $v0 = COPY [[IVEC]](<vscale x 2 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: insertelement_nxv2i1_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
+ ; RV64-NEXT: $v0 = COPY [[IVEC]](<vscale x 2 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ %a = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @insertelement_nxv2i1_2(i1 %x) {
+ ; RV32-LABEL: name: insertelement_nxv2i1_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s32)
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C]](s32)
+ ; RV32-NEXT: $v0 = COPY [[IVEC]](<vscale x 2 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: insertelement_nxv2i1_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s64)
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C]](s32)
+ ; RV64-NEXT: $v0 = COPY [[IVEC]](<vscale x 2 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ %a = insertelement <vscale x 2 x i1> poison, i1 %x, i32 0
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @insertelement_nxv4i1_0() {
+ ; RV32-LABEL: name: insertelement_nxv4i1_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
+ ; RV32-NEXT: $v0 = COPY [[IVEC]](<vscale x 4 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: insertelement_nxv4i1_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
+ ; RV64-NEXT: $v0 = COPY [[IVEC]](<vscale x 4 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ %a = insertelement <vscale x 4 x i1> poison, i1 0, i32 0
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @insertelement_nxv4i1_1() {
+ ; RV32-LABEL: name: insertelement_nxv4i1_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
+ ; RV32-NEXT: $v0 = COPY [[IVEC]](<vscale x 4 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: insertelement_nxv4i1_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
+ ; RV64-NEXT: $v0 = COPY [[IVEC]](<vscale x 4 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ %a = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @insertelement_nxv4i1_2(i1 %x) {
+ ; RV32-LABEL: name: insertelement_nxv4i1_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s32)
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C]](s32)
+ ; RV32-NEXT: $v0 = COPY [[IVEC]](<vscale x 4 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: insertelement_nxv4i1_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s64)
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C]](s32)
+ ; RV64-NEXT: $v0 = COPY [[IVEC]](<vscale x 4 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ %a = insertelement <vscale x 4 x i1> poison, i1 %x, i32 0
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @insertelement_nxv8i1_0() {
+ ; RV32-LABEL: name: insertelement_nxv8i1_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
+ ; RV32-NEXT: $v0 = COPY [[IVEC]](<vscale x 8 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: insertelement_nxv8i1_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
+ ; RV64-NEXT: $v0 = COPY [[IVEC]](<vscale x 8 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ %a = insertelement <vscale x 8 x i1> poison, i1 0, i32 0
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @insertelement_nxv8i1_1() {
+ ; RV32-LABEL: name: insertelement_nxv8i1_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
+ ; RV32-NEXT: $v0 = COPY [[IVEC]](<vscale x 8 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: insertelement_nxv8i1_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
+ ; RV64-NEXT: $v0 = COPY [[IVEC]](<vscale x 8 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ %a = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @insertelement_nxv8i1_2(i1 %x) {
+ ; RV32-LABEL: name: insertelement_nxv8i1_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s32)
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C]](s32)
+ ; RV32-NEXT: $v0 = COPY [[IVEC]](<vscale x 8 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: insertelement_nxv8i1_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s64)
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C]](s32)
+ ; RV64-NEXT: $v0 = COPY [[IVEC]](<vscale x 8 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ %a = insertelement <vscale x 8 x i1> poison, i1 %x, i32 0
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @insertelement_nxv16i1_0() {
+ ; RV32-LABEL: name: insertelement_nxv16i1_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
+ ; RV32-NEXT: $v0 = COPY [[IVEC]](<vscale x 16 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: insertelement_nxv16i1_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
+ ; RV64-NEXT: $v0 = COPY [[IVEC]](<vscale x 16 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ %a = insertelement <vscale x 16 x i1> poison, i1 0, i32 0
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 16 x i1> @insertelement_nxv16i1_1() {
+ ; RV32-LABEL: name: insertelement_nxv16i1_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
+ ; RV32-NEXT: $v0 = COPY [[IVEC]](<vscale x 16 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: insertelement_nxv16i1_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
+ ; RV64-NEXT: $v0 = COPY [[IVEC]](<vscale x 16 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ %a = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 16 x i1> @insertelement_nxv16i1_2(i1 %x) {
+ ; RV32-LABEL: name: insertelement_nxv16i1_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s32)
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C]](s32)
+ ; RV32-NEXT: $v0 = COPY [[IVEC]](<vscale x 16 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: insertelement_nxv16i1_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s64)
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C]](s32)
+ ; RV64-NEXT: $v0 = COPY [[IVEC]](<vscale x 16 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ %a = insertelement <vscale x 16 x i1> poison, i1 %x, i32 0
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 4 x i1> @insertelement_nxv4i1_3(<vscale x 4 x i1> %v, i1 %x) {
+ ; RV32-LABEL: name: insertelement_nxv4i1_3
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $v0, $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v0
+ ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY1]](s32)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s1>) = G_INSERT_VECTOR_ELT [[COPY]], [[TRUNC]](s1), [[C]](s32)
+ ; RV32-NEXT: $v0 = COPY [[IVEC]](<vscale x 4 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: insertelement_nxv4i1_3
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $v0, $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v0
+ ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY1]](s64)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s1>) = G_INSERT_VECTOR_ELT [[COPY]], [[TRUNC]](s1), [[C]](s32)
+ ; RV64-NEXT: $v0 = COPY [[IVEC]](<vscale x 4 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ %a = insertelement <vscale x 4 x i1> %v, i1 %x, i32 0
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 1 x i8> @insertelement_nxv1i8_0() {
+ ; RV32-LABEL: name: insertelement_nxv1i8_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv1i8_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 1 x i8> poison, i8 0, i32 0
+ ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @insertelement_nxv1i8_1() {
+ ; RV32-LABEL: name: insertelement_nxv1i8_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 -1
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv1i8_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 -1
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 1 x i8> poison, i8 -1, i32 0
+ ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @insertelement_nxv1i8_2(i8 %x) {
+ ; RV32-LABEL: name: insertelement_nxv1i8_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv1i8_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s64)
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 1 x i8> poison, i8 %x, i32 0
+ ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @insertelement_nxv2i8_0() {
+ ; RV32-LABEL: name: insertelement_nxv2i8_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv2i8_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 2 x i8> poison, i8 0, i32 0
+ ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @insertelement_nxv2i8_1() {
+ ; RV32-LABEL: name: insertelement_nxv2i8_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 -1
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv2i8_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 -1
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 2 x i8> poison, i8 -1, i32 0
+ ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @insertelement_nxv2i8_2(i8 %x) {
+ ; RV32-LABEL: name: insertelement_nxv2i8_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv2i8_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s64)
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 2 x i8> poison, i8 %x, i32 0
+ ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @insertelement_nxv4i8_0() {
+ ; RV32-LABEL: name: insertelement_nxv4i8_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv4i8_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 4 x i8> poison, i8 0, i32 0
+ ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @insertelement_nxv4i8_1() {
+ ; RV32-LABEL: name: insertelement_nxv4i8_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 -1
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv4i8_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 -1
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 4 x i8> poison, i8 -1, i32 0
+ ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @insertelement_nxv4i8_2(i8 %x) {
+ ; RV32-LABEL: name: insertelement_nxv4i8_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv4i8_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s64)
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 4 x i8> poison, i8 %x, i32 0
+ ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @insertelement_nxv8i8_0() {
+ ; RV32-LABEL: name: insertelement_nxv8i8_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 8 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv8i8_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 8 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 8 x i8> poison, i8 0, i32 0
+ ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @insertelement_nxv8i8_1() {
+ ; RV32-LABEL: name: insertelement_nxv8i8_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 -1
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 8 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv8i8_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 -1
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 8 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 8 x i8> poison, i8 -1, i32 0
+ ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @insertelement_nxv8i8_2(i8 %x) {
+ ; RV32-LABEL: name: insertelement_nxv8i8_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 8 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv8i8_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s64)
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 8 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 8 x i8> poison, i8 %x, i32 0
+ ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @insertelement_nxv16i8_0() {
+ ; RV32-LABEL: name: insertelement_nxv16i8_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
+ ; RV32-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 16 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: insertelement_nxv16i8_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
+ ; RV64-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 16 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %a = insertelement <vscale x 16 x i8> poison, i8 0, i32 0
+ ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @insertelement_nxv16i8_1() {
+ ; RV32-LABEL: name: insertelement_nxv16i8_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 -1
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
+ ; RV32-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 16 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: insertelement_nxv16i8_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 -1
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
+ ; RV64-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 16 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %a = insertelement <vscale x 16 x i8> poison, i8 -1, i32 0
+ ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @insertelement_nxv16i8_2(i8 %x) {
+ ; RV32-LABEL: name: insertelement_nxv16i8_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C]](s32)
+ ; RV32-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 16 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: insertelement_nxv16i8_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s64)
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C]](s32)
+ ; RV64-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 16 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %a = insertelement <vscale x 16 x i8> poison, i8 %x, i32 0
+ ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 4 x i8> @insertelement_nxv4i8_3(<vscale x 4 x i8> %v, i8 %x) {
+ ; RV32-LABEL: name: insertelement_nxv4i8_3
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $v8, $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s8>) = G_INSERT_VECTOR_ELT [[COPY]], [[TRUNC]](s8), [[C]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv4i8_3
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $v8, $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s64)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s8>) = G_INSERT_VECTOR_ELT [[COPY]], [[TRUNC]](s8), [[C]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 4 x i8> %v, i8 %x, i32 0
+ ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 1 x i16> @insertelement_nxv1i16_0() {
+ ; RV32-LABEL: name: insertelement_nxv1i16_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv1i16_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 1 x i16> poison, i16 0, i32 0
+ ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 1 x i16> @insertelement_nxv1i16_1() {
+ ; RV32-LABEL: name: insertelement_nxv1i16_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv1i16_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 1 x i16> poison, i16 -1, i32 0
+ ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 1 x i16> @insertelement_nxv1i16_2(i16 %x) {
+ ; RV32-LABEL: name: insertelement_nxv1i16_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv1i16_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s64)
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 1 x i16> poison, i16 %x, i32 0
+ ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @insertelement_nxv2i16_0() {
+ ; RV32-LABEL: name: insertelement_nxv2i16_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv2i16_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 2 x i16> poison, i16 0, i32 0
+ ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 2 x i16> @insertelement_nxv2i16_1() {
+ ; RV32-LABEL: name: insertelement_nxv2i16_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv2i16_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 2 x i16> poison, i16 -1, i32 0
+ ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 2 x i16> @insertelement_nxv2i16_2(i16 %x) {
+ ; RV32-LABEL: name: insertelement_nxv2i16_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv2i16_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s64)
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 2 x i16> poison, i16 %x, i32 0
+ ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @insertelement_nxv4i16_0() {
+ ; RV32-LABEL: name: insertelement_nxv4i16_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv4i16_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 4 x i16> poison, i16 0, i32 0
+ ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 4 x i16> @insertelement_nxv4i16_1() {
+ ; RV32-LABEL: name: insertelement_nxv4i16_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv4i16_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 4 x i16> poison, i16 -1, i32 0
+ ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 4 x i16> @insertelement_nxv4i16_2(i16 %x) {
+ ; RV32-LABEL: name: insertelement_nxv4i16_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv4i16_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s64)
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 4 x i16> poison, i16 %x, i32 0
+ ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @insertelement_nxv8i16_0() {
+ ; RV32-LABEL: name: insertelement_nxv8i16_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
+ ; RV32-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 8 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: insertelement_nxv8i16_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
+ ; RV64-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 8 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %a = insertelement <vscale x 8 x i16> poison, i16 0, i32 0
+ ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 8 x i16> @insertelement_nxv8i16_1() {
+ ; RV32-LABEL: name: insertelement_nxv8i16_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
+ ; RV32-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 8 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: insertelement_nxv8i16_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
+ ; RV64-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 8 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %a = insertelement <vscale x 8 x i16> poison, i16 -1, i32 0
+ ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 8 x i16> @insertelement_nxv8i16_2(i16 %x) {
+ ; RV32-LABEL: name: insertelement_nxv8i16_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C]](s32)
+ ; RV32-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 8 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: insertelement_nxv8i16_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s64)
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C]](s32)
+ ; RV64-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 8 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %a = insertelement <vscale x 8 x i16> poison, i16 %x, i32 0
+ ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @insertelement_nxv16i16_0() {
+ ; RV32-LABEL: name: insertelement_nxv16i16_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
+ ; RV32-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 16 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: insertelement_nxv16i16_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
+ ; RV64-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 16 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %a = insertelement <vscale x 16 x i16> poison, i16 0, i32 0
+ ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 16 x i16> @insertelement_nxv16i16_1() {
+ ; RV32-LABEL: name: insertelement_nxv16i16_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
+ ; RV32-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 16 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: insertelement_nxv16i16_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
+ ; RV64-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 16 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %a = insertelement <vscale x 16 x i16> poison, i16 -1, i32 0
+ ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 16 x i16> @insertelement_nxv16i16_2(i16 %x) {
+ ; RV32-LABEL: name: insertelement_nxv16i16_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C]](s32)
+ ; RV32-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 16 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: insertelement_nxv16i16_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s64)
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s16), [[C]](s32)
+ ; RV64-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 16 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %a = insertelement <vscale x 16 x i16> poison, i16 %x, i32 0
+ ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 4 x i16> @insertelement_nxv4i16(<vscale x 4 x i16> %v, i16 %x) {
+ ; RV32-LABEL: name: insertelement_nxv4i16
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $v8, $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s16>) = G_INSERT_VECTOR_ELT [[COPY]], [[TRUNC]](s16), [[C]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv4i16
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $v8, $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s64)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s16>) = G_INSERT_VECTOR_ELT [[COPY]], [[TRUNC]](s16), [[C]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 4 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 4 x i16> %v, i16 %x, i32 0
+ ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 1 x i32> @insertelement_nxv1i32_0() {
+ ; RV32-LABEL: name: insertelement_nxv1i32_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv1i32_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 1 x i32> poison, i32 0, i32 0
+ ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 1 x i32> @insertelement_nxv1i32_1() {
+ ; RV32-LABEL: name: insertelement_nxv1i32_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C1]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv1i32_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C1]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 1 x i32> poison, i32 -1, i32 0
+ ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 1 x i32> @insertelement_nxv1i32_2(i32 %x) {
+ ; RV32-LABEL: name: insertelement_nxv1i32_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[COPY]](s32), [[C]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv1i32_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s32), [[C]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 1 x i32> poison, i32 %x, i32 0
+ ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @insertelement_nxv2i32_0() {
+ ; RV32-LABEL: name: insertelement_nxv2i32_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv2i32_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 2 x i32> poison, i32 0, i32 0
+ ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 2 x i32> @insertelement_nxv2i32_1() {
+ ; RV32-LABEL: name: insertelement_nxv2i32_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C1]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv2i32_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C1]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 2 x i32> poison, i32 -1, i32 0
+ ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 2 x i32> @insertelement_nxv2i32_2(i32 %x) {
+ ; RV32-LABEL: name: insertelement_nxv2i32_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[COPY]](s32), [[C]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv2i32_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s32), [[C]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 2 x i32> poison, i32 %x, i32 0
+ ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @insertelement_nxv4i32_0() {
+ ; RV32-LABEL: name: insertelement_nxv4i32_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C]](s32)
+ ; RV32-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 4 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: insertelement_nxv4i32_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C]](s32)
+ ; RV64-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 4 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %a = insertelement <vscale x 4 x i32> poison, i32 0, i32 0
+ ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 4 x i32> @insertelement_nxv4i32_1() {
+ ; RV32-LABEL: name: insertelement_nxv4i32_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C1]](s32)
+ ; RV32-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 4 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: insertelement_nxv4i32_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C1]](s32)
+ ; RV64-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 4 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %a = insertelement <vscale x 4 x i32> poison, i32 -1, i32 0
+ ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 4 x i32> @insertelement_nxv4i32_2(i32 %x) {
+ ; RV32-LABEL: name: insertelement_nxv4i32_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[COPY]](s32), [[C]](s32)
+ ; RV32-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 4 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: insertelement_nxv4i32_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s32), [[C]](s32)
+ ; RV64-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 4 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %a = insertelement <vscale x 4 x i32> poison, i32 %x, i32 0
+ ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @insertelement_nxv8i32_0() {
+ ; RV32-LABEL: name: insertelement_nxv8i32_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C]](s32)
+ ; RV32-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 8 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: insertelement_nxv8i32_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C]](s32)
+ ; RV64-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 8 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %a = insertelement <vscale x 8 x i32> poison, i32 0, i32 0
+ ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 8 x i32> @insertelement_nxv8i32_1() {
+ ; RV32-LABEL: name: insertelement_nxv8i32_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C1]](s32)
+ ; RV32-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 8 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: insertelement_nxv8i32_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C1]](s32)
+ ; RV64-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 8 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %a = insertelement <vscale x 8 x i32> poison, i32 -1, i32 0
+ ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 8 x i32> @insertelement_nxv8i32_2(i32 %x) {
+ ; RV32-LABEL: name: insertelement_nxv8i32_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[COPY]](s32), [[C]](s32)
+ ; RV32-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 8 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: insertelement_nxv8i32_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s32), [[C]](s32)
+ ; RV64-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 8 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %a = insertelement <vscale x 8 x i32> poison, i32 %x, i32 0
+ ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @insertelement_nxv16i32_0() {
+ ; RV32-LABEL: name: insertelement_nxv16i32_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C]](s32)
+ ; RV32-NEXT: $v8m8 = COPY [[IVEC]](<vscale x 16 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv16i32_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C]](s32)
+ ; RV64-NEXT: $v8m8 = COPY [[IVEC]](<vscale x 16 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %a = insertelement <vscale x 16 x i32> poison, i32 0, i32 0
+ ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 16 x i32> @insertelement_nxv16i32_1() {
+ ; RV32-LABEL: name: insertelement_nxv16i32_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C1]](s32)
+ ; RV32-NEXT: $v8m8 = COPY [[IVEC]](<vscale x 16 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv16i32_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s32), [[C1]](s32)
+ ; RV64-NEXT: $v8m8 = COPY [[IVEC]](<vscale x 16 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %a = insertelement <vscale x 16 x i32> poison, i32 -1, i32 0
+ ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 16 x i32> @insertelement_nxv16i32_2(i32 %x) {
+ ; RV32-LABEL: name: insertelement_nxv16i32_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[COPY]](s32), [[C]](s32)
+ ; RV32-NEXT: $v8m8 = COPY [[IVEC]](<vscale x 16 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv16i32_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s32), [[C]](s32)
+ ; RV64-NEXT: $v8m8 = COPY [[IVEC]](<vscale x 16 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %a = insertelement <vscale x 16 x i32> poison, i32 %x, i32 0
+ ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 4 x i32> @insertelement_nxv4i32(<vscale x 4 x i32> %v, i32 %x) {
+ ; RV32-LABEL: name: insertelement_nxv4i32
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10, $v8m2
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
+ ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s32>) = G_INSERT_VECTOR_ELT [[COPY]], [[COPY1]](s32), [[C]](s32)
+ ; RV32-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 4 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: insertelement_nxv4i32
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10, $v8m2
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
+ ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s32>) = G_INSERT_VECTOR_ELT [[COPY]], [[TRUNC]](s32), [[C]](s32)
+ ; RV64-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 4 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %a = insertelement <vscale x 4 x i32> %v, i32 %x, i32 0
+ ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 1 x i64> @insertelement_nxv1i64_0() {
+ ; RV32-LABEL: name: insertelement_nxv1i64_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv1i64_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 1 x i64> poison, i64 0, i32 0
+ ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 1 x i64> @insertelement_nxv1i64_1() {
+ ; RV32-LABEL: name: insertelement_nxv1i64_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv1i64_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 1 x i64> poison, i64 -1, i32 0
+ ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 1 x i64> @insertelement_nxv1i64_2(i64 %x) {
+ ; RV32-LABEL: name: insertelement_nxv1i64_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10, $x11
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+ ; RV32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[MV]](s64), [[C]](s32)
+ ; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv1i64_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[COPY]](s64), [[C]](s32)
+ ; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 1 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ %a = insertelement <vscale x 1 x i64> poison, i64 %x, i32 0
+ ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 2 x i64> @insertelement_nxv2i64_0() {
+ ; RV32-LABEL: name: insertelement_nxv2i64_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s32)
+ ; RV32-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 2 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: insertelement_nxv2i64_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s32)
+ ; RV64-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 2 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %a = insertelement <vscale x 2 x i64> poison, i64 0, i32 0
+ ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 2 x i64> @insertelement_nxv2i64_1() {
+ ; RV32-LABEL: name: insertelement_nxv2i64_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s32)
+ ; RV32-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 2 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: insertelement_nxv2i64_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s32)
+ ; RV64-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 2 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %a = insertelement <vscale x 2 x i64> poison, i64 -1, i32 0
+ ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 2 x i64> @insertelement_nxv2i64_2(i64 %x) {
+ ; RV32-LABEL: name: insertelement_nxv2i64_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10, $x11
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+ ; RV32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[MV]](s64), [[C]](s32)
+ ; RV32-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 2 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: insertelement_nxv2i64_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[COPY]](s64), [[C]](s32)
+ ; RV64-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 2 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ %a = insertelement <vscale x 2 x i64> poison, i64 %x, i32 0
+ ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 4 x i64> @insertelement_nxv4i64_0() {
+ ; RV32-LABEL: name: insertelement_nxv4i64_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s32)
+ ; RV32-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 4 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: insertelement_nxv4i64_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s32)
+ ; RV64-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 4 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %a = insertelement <vscale x 4 x i64> poison, i64 0, i32 0
+ ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 4 x i64> @insertelement_nxv4i64_1() {
+ ; RV32-LABEL: name: insertelement_nxv4i64_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s32)
+ ; RV32-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 4 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: insertelement_nxv4i64_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s32)
+ ; RV64-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 4 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %a = insertelement <vscale x 4 x i64> poison, i64 -1, i32 0
+ ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 4 x i64> @insertelement_nxv4i64_2(i64 %x) {
+ ; RV32-LABEL: name: insertelement_nxv4i64_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10, $x11
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+ ; RV32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[MV]](s64), [[C]](s32)
+ ; RV32-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 4 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: insertelement_nxv4i64_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[COPY]](s64), [[C]](s32)
+ ; RV64-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 4 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %a = insertelement <vscale x 4 x i64> poison, i64 %x, i32 0
+ ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 8 x i64> @insertelement_nxv8i64_0() {
+ ; RV32-LABEL: name: insertelement_nxv8i64_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s32)
+ ; RV32-NEXT: $v8m8 = COPY [[IVEC]](<vscale x 8 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv8i64_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s32)
+ ; RV64-NEXT: $v8m8 = COPY [[IVEC]](<vscale x 8 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %a = insertelement <vscale x 8 x i64> poison, i64 0, i32 0
+ ret <vscale x 8 x i64> %a
+}
+
+define <vscale x 8 x i64> @insertelement_nxv8i64_1() {
+ ; RV32-LABEL: name: insertelement_nxv8i64_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s32)
+ ; RV32-NEXT: $v8m8 = COPY [[IVEC]](<vscale x 8 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv8i64_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s32)
+ ; RV64-NEXT: $v8m8 = COPY [[IVEC]](<vscale x 8 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %a = insertelement <vscale x 8 x i64> poison, i64 -1, i32 0
+ ret <vscale x 8 x i64> %a
+}
+
+define <vscale x 8 x i64> @insertelement_nxv8i64_2(i64 %x) {
+ ; RV32-LABEL: name: insertelement_nxv8i64_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10, $x11
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+ ; RV32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[MV]](s64), [[C]](s32)
+ ; RV32-NEXT: $v8m8 = COPY [[IVEC]](<vscale x 8 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv8i64_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[COPY]](s64), [[C]](s32)
+ ; RV64-NEXT: $v8m8 = COPY [[IVEC]](<vscale x 8 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ %a = insertelement <vscale x 8 x i64> poison, i64 %x, i32 0
+ ret <vscale x 8 x i64> %a
+}
+
+define <vscale x 16 x i64> @insertelement_nxv16i64_0() {
+ ; RV32-LABEL: name: insertelement_nxv16i64_0
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s32)
+ ; RV32-NEXT: [[UV:%[0-9]+]]:_(<vscale x 8 x s64>), [[UV1:%[0-9]+]]:_(<vscale x 8 x s64>) = G_UNMERGE_VALUES [[IVEC]](<vscale x 16 x s64>)
+ ; RV32-NEXT: $v8m8 = COPY [[UV]](<vscale x 8 x s64>)
+ ; RV32-NEXT: $v16m8 = COPY [[UV1]](<vscale x 8 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8, implicit $v16m8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv16i64_0
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s32)
+ ; RV64-NEXT: [[UV:%[0-9]+]]:_(<vscale x 8 x s64>), [[UV1:%[0-9]+]]:_(<vscale x 8 x s64>) = G_UNMERGE_VALUES [[IVEC]](<vscale x 16 x s64>)
+ ; RV64-NEXT: $v8m8 = COPY [[UV]](<vscale x 8 x s64>)
+ ; RV64-NEXT: $v16m8 = COPY [[UV1]](<vscale x 8 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8, implicit $v16m8
+ %a = insertelement <vscale x 16 x i64> poison, i64 0, i32 0
+ ret <vscale x 16 x i64> %a
+}
+
+define <vscale x 16 x i64> @insertelement_nxv16i64_1() {
+ ; RV32-LABEL: name: insertelement_nxv16i64_1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s32)
+ ; RV32-NEXT: [[UV:%[0-9]+]]:_(<vscale x 8 x s64>), [[UV1:%[0-9]+]]:_(<vscale x 8 x s64>) = G_UNMERGE_VALUES [[IVEC]](<vscale x 16 x s64>)
+ ; RV32-NEXT: $v8m8 = COPY [[UV]](<vscale x 8 x s64>)
+ ; RV32-NEXT: $v16m8 = COPY [[UV1]](<vscale x 8 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8, implicit $v16m8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv16i64_1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s64), [[C1]](s32)
+ ; RV64-NEXT: [[UV:%[0-9]+]]:_(<vscale x 8 x s64>), [[UV1:%[0-9]+]]:_(<vscale x 8 x s64>) = G_UNMERGE_VALUES [[IVEC]](<vscale x 16 x s64>)
+ ; RV64-NEXT: $v8m8 = COPY [[UV]](<vscale x 8 x s64>)
+ ; RV64-NEXT: $v16m8 = COPY [[UV1]](<vscale x 8 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8, implicit $v16m8
+ %a = insertelement <vscale x 16 x i64> poison, i64 -1, i32 0
+ ret <vscale x 16 x i64> %a
+}
+
+define <vscale x 16 x i64> @insertelement_nxv16i64_2(i64 %x) {
+ ; RV32-LABEL: name: insertelement_nxv16i64_2
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10, $x11
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+ ; RV32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
+ ; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s64>) = G_IMPLICIT_DEF
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[MV]](s64), [[C]](s32)
+ ; RV32-NEXT: [[UV:%[0-9]+]]:_(<vscale x 8 x s64>), [[UV1:%[0-9]+]]:_(<vscale x 8 x s64>) = G_UNMERGE_VALUES [[IVEC]](<vscale x 16 x s64>)
+ ; RV32-NEXT: $v8m8 = COPY [[UV]](<vscale x 8 x s64>)
+ ; RV32-NEXT: $v16m8 = COPY [[UV1]](<vscale x 8 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8, implicit $v16m8
+ ;
+ ; RV64-LABEL: name: insertelement_nxv16i64_2
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s64>) = G_IMPLICIT_DEF
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s64>) = G_INSERT_VECTOR_ELT [[DEF]], [[COPY]](s64), [[C]](s32)
+ ; RV64-NEXT: [[UV:%[0-9]+]]:_(<vscale x 8 x s64>), [[UV1:%[0-9]+]]:_(<vscale x 8 x s64>) = G_UNMERGE_VALUES [[IVEC]](<vscale x 16 x s64>)
+ ; RV64-NEXT: $v8m8 = COPY [[UV]](<vscale x 8 x s64>)
+ ; RV64-NEXT: $v16m8 = COPY [[UV1]](<vscale x 8 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8, implicit $v16m8
+ %a = insertelement <vscale x 16 x i64> poison, i64 %x, i32 0
+ ret <vscale x 16 x i64> %a
+}
+
+define <vscale x 4 x i64> @insertelement_nxv4i64(<vscale x 4 x i64> %v, i64 %x) {
+ ; RV32-LABEL: name: insertelement_nxv4i64
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: liveins: $x10, $x11, $v8m4
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8m4
+ ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x11
+ ; RV32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY1]](s32), [[COPY2]](s32)
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s64>) = G_INSERT_VECTOR_ELT [[COPY]], [[MV]](s64), [[C]](s32)
+ ; RV32-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 4 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: insertelement_nxv4i64
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: liveins: $x10, $v8m4
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8m4
+ ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s64>) = G_INSERT_VECTOR_ELT [[COPY]], [[COPY1]](s64), [[C]](s32)
+ ; RV64-NEXT: $v8m4 = COPY [[IVEC]](<vscale x 4 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ %a = insertelement <vscale x 4 x i64> %v, i64 %x, i32 0
+ ret <vscale x 4 x i64> %a
+}
>From 035da4c5ef9b754932d53b7562dcbb455af35012 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Mon, 12 Feb 2024 12:13:52 -0800
Subject: [PATCH 2/2] fixup! better test coverage of idx argument
---
.../GlobalISel/irtranslator/insertelement.ll | 124 +++++++++---------
1 file changed, 65 insertions(+), 59 deletions(-)
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/insertelement.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/insertelement.ll
index 9255967a4da82e..c23d1e7c70992b 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/insertelement.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/insertelement.ll
@@ -48,31 +48,32 @@ define <vscale x 1 x i1> @insertelement_nxv1i1_1() {
ret <vscale x 1 x i1> %a
}
-define <vscale x 1 x i1> @insertelement_nxv1i1_2(i1 %x) {
+define <vscale x 1 x i1> @insertelement_nxv1i1_2(i1 %x, i32 %idx) {
; RV32-LABEL: name: insertelement_nxv1i1_2
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: liveins: $x10, $x11
; RV32-NEXT: {{ $}}
; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s32)
+ ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
- ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C]](s32)
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[COPY1]](s32)
; RV32-NEXT: $v0 = COPY [[IVEC]](<vscale x 1 x s1>)
; RV32-NEXT: PseudoRET implicit $v0
;
; RV64-LABEL: name: insertelement_nxv1i1_2
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: liveins: $x10, $x11
; RV64-NEXT: {{ $}}
; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s64)
+ ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+ ; RV64-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
- ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C]](s32)
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 1 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[TRUNC1]](s32)
; RV64-NEXT: $v0 = COPY [[IVEC]](<vscale x 1 x s1>)
; RV64-NEXT: PseudoRET implicit $v0
- %a = insertelement <vscale x 1 x i1> poison, i1 %x, i32 0
+ %a = insertelement <vscale x 1 x i1> poison, i1 %x, i32 %idx
ret <vscale x 1 x i1> %a
}
@@ -81,7 +82,7 @@ define <vscale x 2 x i1> @insertelement_nxv2i1_0() {
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
- ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
; RV32-NEXT: $v0 = COPY [[IVEC]](<vscale x 2 x s1>)
; RV32-NEXT: PseudoRET implicit $v0
@@ -90,11 +91,11 @@ define <vscale x 2 x i1> @insertelement_nxv2i1_0() {
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
- ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
; RV64-NEXT: $v0 = COPY [[IVEC]](<vscale x 2 x s1>)
; RV64-NEXT: PseudoRET implicit $v0
- %a = insertelement <vscale x 2 x i1> poison, i1 0, i32 0
+ %a = insertelement <vscale x 2 x i1> poison, i1 0, i32 1
ret <vscale x 2 x i1> %a
}
@@ -120,31 +121,32 @@ define <vscale x 2 x i1> @insertelement_nxv2i1_1() {
ret <vscale x 2 x i1> %a
}
-define <vscale x 2 x i1> @insertelement_nxv2i1_2(i1 %x) {
+define <vscale x 2 x i1> @insertelement_nxv2i1_2(i1 %x, i32 %idx) {
; RV32-LABEL: name: insertelement_nxv2i1_2
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: liveins: $x10, $x11
; RV32-NEXT: {{ $}}
; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s32)
+ ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
- ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C]](s32)
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[COPY1]](s32)
; RV32-NEXT: $v0 = COPY [[IVEC]](<vscale x 2 x s1>)
; RV32-NEXT: PseudoRET implicit $v0
;
; RV64-LABEL: name: insertelement_nxv2i1_2
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: liveins: $x10, $x11
; RV64-NEXT: {{ $}}
; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s64)
+ ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+ ; RV64-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
- ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C]](s32)
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[TRUNC1]](s32)
; RV64-NEXT: $v0 = COPY [[IVEC]](<vscale x 2 x s1>)
; RV64-NEXT: PseudoRET implicit $v0
- %a = insertelement <vscale x 2 x i1> poison, i1 %x, i32 0
+ %a = insertelement <vscale x 2 x i1> poison, i1 %x, i32 %idx
ret <vscale x 2 x i1> %a
}
@@ -153,7 +155,7 @@ define <vscale x 4 x i1> @insertelement_nxv4i1_0() {
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
- ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
; RV32-NEXT: $v0 = COPY [[IVEC]](<vscale x 4 x s1>)
; RV32-NEXT: PseudoRET implicit $v0
@@ -162,11 +164,11 @@ define <vscale x 4 x i1> @insertelement_nxv4i1_0() {
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
- ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 4 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
; RV64-NEXT: $v0 = COPY [[IVEC]](<vscale x 4 x s1>)
; RV64-NEXT: PseudoRET implicit $v0
- %a = insertelement <vscale x 4 x i1> poison, i1 0, i32 0
+ %a = insertelement <vscale x 4 x i1> poison, i1 0, i32 2
ret <vscale x 4 x i1> %a
}
@@ -264,31 +266,32 @@ define <vscale x 8 x i1> @insertelement_nxv8i1_1() {
ret <vscale x 8 x i1> %a
}
-define <vscale x 8 x i1> @insertelement_nxv8i1_2(i1 %x) {
+define <vscale x 8 x i1> @insertelement_nxv8i1_2(i1 %x, i32 %idx) {
; RV32-LABEL: name: insertelement_nxv8i1_2
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: liveins: $x10, $x11
; RV32-NEXT: {{ $}}
; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s32)
+ ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
- ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C]](s32)
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[COPY1]](s32)
; RV32-NEXT: $v0 = COPY [[IVEC]](<vscale x 8 x s1>)
; RV32-NEXT: PseudoRET implicit $v0
;
; RV64-LABEL: name: insertelement_nxv8i1_2
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: liveins: $x10, $x11
; RV64-NEXT: {{ $}}
; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s64)
+ ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+ ; RV64-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
- ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C]](s32)
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 8 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[TRUNC1]](s32)
; RV64-NEXT: $v0 = COPY [[IVEC]](<vscale x 8 x s1>)
; RV64-NEXT: PseudoRET implicit $v0
- %a = insertelement <vscale x 8 x i1> poison, i1 %x, i32 0
+ %a = insertelement <vscale x 8 x i1> poison, i1 %x, i32 %idx
ret <vscale x 8 x i1> %a
}
@@ -297,7 +300,7 @@ define <vscale x 16 x i1> @insertelement_nxv16i1_0() {
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
- ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
; RV32-NEXT: $v0 = COPY [[IVEC]](<vscale x 16 x s1>)
; RV32-NEXT: PseudoRET implicit $v0
@@ -306,11 +309,11 @@ define <vscale x 16 x i1> @insertelement_nxv16i1_0() {
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
- ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s1), [[C1]](s32)
; RV64-NEXT: $v0 = COPY [[IVEC]](<vscale x 16 x s1>)
; RV64-NEXT: PseudoRET implicit $v0
- %a = insertelement <vscale x 16 x i1> poison, i1 0, i32 0
+ %a = insertelement <vscale x 16 x i1> poison, i1 0, i32 15
ret <vscale x 16 x i1> %a
}
@@ -336,31 +339,32 @@ define <vscale x 16 x i1> @insertelement_nxv16i1_1() {
ret <vscale x 16 x i1> %a
}
-define <vscale x 16 x i1> @insertelement_nxv16i1_2(i1 %x) {
+define <vscale x 16 x i1> @insertelement_nxv16i1_2(i1 %x, i32 %idx) {
; RV32-LABEL: name: insertelement_nxv16i1_2
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: liveins: $x10, $x11
; RV32-NEXT: {{ $}}
; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s32)
+ ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
- ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C]](s32)
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[COPY1]](s32)
; RV32-NEXT: $v0 = COPY [[IVEC]](<vscale x 16 x s1>)
; RV32-NEXT: PseudoRET implicit $v0
;
; RV64-LABEL: name: insertelement_nxv16i1_2
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: liveins: $x10, $x11
; RV64-NEXT: {{ $}}
; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s64)
+ ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+ ; RV64-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
- ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C]](s32)
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[TRUNC1]](s32)
; RV64-NEXT: $v0 = COPY [[IVEC]](<vscale x 16 x s1>)
; RV64-NEXT: PseudoRET implicit $v0
- %a = insertelement <vscale x 16 x i1> poison, i1 %x, i32 0
+ %a = insertelement <vscale x 16 x i1> poison, i1 %x, i32 %idx
ret <vscale x 16 x i1> %a
}
@@ -685,8 +689,8 @@ define <vscale x 16 x i8> @insertelement_nxv16i8_0() {
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
- ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
- ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s64)
; RV32-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 16 x s8>)
; RV32-NEXT: PseudoRET implicit $v8m2
;
@@ -694,11 +698,11 @@ define <vscale x 16 x i8> @insertelement_nxv16i8_0() {
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
- ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
- ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s32)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s8), [[C1]](s64)
; RV64-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 16 x s8>)
; RV64-NEXT: PseudoRET implicit $v8m2
- %a = insertelement <vscale x 16 x i8> poison, i8 0, i32 0
+ %a = insertelement <vscale x 16 x i8> poison, i8 0, i64 0
ret <vscale x 16 x i8> %a
}
@@ -724,31 +728,33 @@ define <vscale x 16 x i8> @insertelement_nxv16i8_1() {
ret <vscale x 16 x i8> %a
}
-define <vscale x 16 x i8> @insertelement_nxv16i8_2(i8 %x) {
+define <vscale x 16 x i8> @insertelement_nxv16i8_2(i8 %x, i64 %idx) {
; RV32-LABEL: name: insertelement_nxv16i8_2
; RV32: bb.1 (%ir-block.0):
- ; RV32-NEXT: liveins: $x10
+ ; RV32-NEXT: liveins: $x10, $x11, $x12
; RV32-NEXT: {{ $}}
; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
+ ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+ ; RV32-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
+ ; RV32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY1]](s32), [[COPY2]](s32)
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
- ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
- ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C]](s32)
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[MV]](s64)
; RV32-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 16 x s8>)
; RV32-NEXT: PseudoRET implicit $v8m2
;
; RV64-LABEL: name: insertelement_nxv16i8_2
; RV64: bb.1 (%ir-block.0):
- ; RV64-NEXT: liveins: $x10
+ ; RV64-NEXT: liveins: $x10, $x11
; RV64-NEXT: {{ $}}
; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s64)
+ ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
- ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
- ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[C]](s32)
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 16 x s8>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s8), [[COPY1]](s64)
; RV64-NEXT: $v8m2 = COPY [[IVEC]](<vscale x 16 x s8>)
; RV64-NEXT: PseudoRET implicit $v8m2
- %a = insertelement <vscale x 16 x i8> poison, i8 %x, i32 0
+ %a = insertelement <vscale x 16 x i8> poison, i8 %x, i64 %idx
ret <vscale x 16 x i8> %a
}
@@ -857,8 +863,8 @@ define <vscale x 2 x i16> @insertelement_nxv2i16_0() {
; RV32: bb.1 (%ir-block.0):
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
- ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
- ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
+ ; RV32-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; RV32-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s64)
; RV32-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s16>)
; RV32-NEXT: PseudoRET implicit $v8
;
@@ -866,11 +872,11 @@ define <vscale x 2 x i16> @insertelement_nxv2i16_0() {
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
- ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
- ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s32)
+ ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+ ; RV64-NEXT: [[IVEC:%[0-9]+]]:_(<vscale x 2 x s16>) = G_INSERT_VECTOR_ELT [[DEF]], [[C]](s16), [[C1]](s64)
; RV64-NEXT: $v8 = COPY [[IVEC]](<vscale x 2 x s16>)
; RV64-NEXT: PseudoRET implicit $v8
- %a = insertelement <vscale x 2 x i16> poison, i16 0, i32 0
+ %a = insertelement <vscale x 2 x i16> poison, i16 0, i64 1
ret <vscale x 2 x i16> %a
}
More information about the llvm-commits
mailing list