[llvm] [RISCV][GISEL] Legalize, regbank select, and instruction select G_ZEXT, G_SEXT, G_ANYEXT, G_SPLAT_VECTOR, and G_ICMP (PR #85938)
Michael Maitland via llvm-commits
llvm-commits at lists.llvm.org
Tue Mar 26 17:44:25 PDT 2024
- Previous message: [llvm] [RISCV][GISEL] Legalize, regbank select, and instruction select G_ZEXT, G_SEXT, G_ANYEXT, G_SPLAT_VECTOR, and G_ICMP (PR #85938)
- Next message: [llvm] [RISCV][GISEL] Legalize, regbank select, and instruction select G_ZEXT, G_SEXT, G_ANYEXT, G_SPLAT_VECTOR, and G_ICMP (PR #85938)
- Messages sorted by:
[ date ]
[ thread ]
[ subject ]
[ author ]
https://github.com/michaelmaitland updated https://github.com/llvm/llvm-project/pull/85938
>From e0f143e2bcc3be50dcb03ca3842a73c2dbcb7069 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Thu, 7 Mar 2024 13:40:30 -0800
Subject: [PATCH 1/9] [RISCV][GISEL] Legalize G_ZEXT, G_SEXT, and G_ANYEXT for
scalable vector types
---
.../Target/RISCV/GISel/RISCVLegalizerInfo.cpp | 53 +-
.../Target/RISCV/GISel/RISCVLegalizerInfo.h | 1 +
.../legalizer/rvv/legalize-anyext.mir | 761 ++++++++++++++++
.../legalizer/rvv/legalize-sext.mir | 811 ++++++++++++++++++
.../legalizer/rvv/legalize-zext.mir | 811 ++++++++++++++++++
5 files changed, 2432 insertions(+), 5 deletions(-)
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-anyext.mir
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-sext.mir
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-zext.mir
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 9a388f4cd2717b..9be7309dce7f56 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -139,18 +139,20 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
.clampScalar(0, s32, sXLen)
.minScalarSameAs(1, 0);
+ auto &ExtActions =
+ getActionDefinitionsBuilder({G_ZEXT, G_SEXT, G_ANYEXT})
+ .customIf(typeIsLegalBoolVec(1, BoolVecTys, ST))
+ .legalIf(all(typeIsLegalIntOrFPVec(0, IntOrFPVecTys, ST),
+ typeIsLegalIntOrFPVec(1, IntOrFPVecTys, ST)))
+ .maxScalar(0, sXLen);
if (ST.is64Bit()) {
- getActionDefinitionsBuilder({G_ZEXT, G_SEXT, G_ANYEXT})
- .legalFor({{sXLen, s32}})
- .maxScalar(0, sXLen);
+ ExtActions.legalFor({{sXLen, s32}});
getActionDefinitionsBuilder(G_SEXT_INREG)
.customFor({sXLen})
.maxScalar(0, sXLen)
.lower();
} else {
- getActionDefinitionsBuilder({G_ZEXT, G_SEXT, G_ANYEXT}).maxScalar(0, sXLen);
-
getActionDefinitionsBuilder(G_SEXT_INREG).maxScalar(0, sXLen).lower();
}
@@ -570,6 +572,43 @@ bool RISCVLegalizerInfo::legalizeVScale(MachineInstr &MI,
auto VScale = MIB.buildLShr(XLenTy, VLENB, MIB.buildConstant(XLenTy, 3));
MIB.buildMul(Dst, VScale, MIB.buildConstant(XLenTy, Val));
}
+ MI.eraseFromParent();
+ return true;
+}
+
+// Custom-lower extensions from mask vectors by using a vselect either with 1
+// for zero/any-extension or -1 for sign-extension:
+// (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0)
+// Note that any-extension is lowered identically to zero-extension.
+bool RISCVLegalizerInfo::legalizeExt(MachineInstr &MI,
+ MachineIRBuilder &MIB) const {
+
+ unsigned Opc = MI.getOpcode();
+ assert(Opc == TargetOpcode::G_ZEXT || Opc == TargetOpcode::G_SEXT ||
+ Opc == TargetOpcode::G_ANYEXT);
+
+ MachineRegisterInfo &MRI = *MIB.getMRI();
+ Register Dst = MI.getOperand(0).getReg();
+ Register Src = MI.getOperand(1).getReg();
+
+ LLT DstTy = MRI.getType(Dst);
+ LLT SrcTy = MRI.getType(Src);
+
+ // The only custom legalization of extends we handle are vector extends.
+ if (!DstTy.isVector() || !SrcTy.isVector())
+ return false;
+
+ // The only custom legalization of extends is from mask types
+ if (SrcTy.getElementType().getSizeInBits() != 1)
+ return false;
+
+ int64_t ExtTrueVal =
+ Opc == TargetOpcode::G_ZEXT || Opc == TargetOpcode::G_ANYEXT ? 1 : -1;
+ LLT DstEltTy = DstTy.getElementType();
+ auto SplatZero = MIB.buildSplatVector(DstTy, MIB.buildConstant(DstEltTy, 0));
+ auto SplatTrue =
+ MIB.buildSplatVector(DstTy, MIB.buildConstant(DstEltTy, ExtTrueVal));
+ MIB.buildSelect(Dst, Src, SplatTrue, SplatZero);
MI.eraseFromParent();
return true;
@@ -634,6 +673,10 @@ bool RISCVLegalizerInfo::legalizeCustom(
return legalizeVAStart(MI, MIRBuilder);
case TargetOpcode::G_VSCALE:
return legalizeVScale(MI, MIRBuilder);
+ case TargetOpcode::G_ZEXT:
+ case TargetOpcode::G_SEXT:
+ case TargetOpcode::G_ANYEXT:
+ return legalizeExt(MI, MIRBuilder);
}
llvm_unreachable("expected switch to return");
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
index e2a98c8d2c736c..a31bc482c69337 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
@@ -43,6 +43,7 @@ class RISCVLegalizerInfo : public LegalizerInfo {
bool legalizeVAStart(MachineInstr &MI, MachineIRBuilder &MIRBuilder) const;
bool legalizeVScale(MachineInstr &MI, MachineIRBuilder &MIB) const;
+ bool legalizeExt(MachineInstr &MI, MachineIRBuilder &MIRBuilder) const;
};
} // end namespace llvm
#endif
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-anyext.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-anyext.mir
new file mode 100644
index 00000000000000..2935d79f6411ed
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-anyext.mir
@@ -0,0 +1,761 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+
+# Extend from s1 element vectors
+---
+name: anyext_nxv1i8_nxv1i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: anyext_nxv1i8_nxv1i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s8>) = G_ANYEXT %1(<vscale x 1 x s1>)
+ $v8 = COPY %0(<vscale x 1 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv1i16_nxv1i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: anyext_nxv1i16_nxv1i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s16>) = G_ANYEXT %1(<vscale x 1 x s1>)
+ $v8 = COPY %0(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv1i32_nxv1i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: anyext_nxv1i32_nxv1i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s32>) = G_ANYEXT %1(<vscale x 1 x s1>)
+ $v8 = COPY %0(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv1i64_nxv1i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: anyext_nxv1i64_nxv1i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s64>) = G_ANYEXT %1(<vscale x 1 x s1>)
+ $v8 = COPY %0(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv2i8_nxv2i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: anyext_nxv2i8_nxv2i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s8>) = G_ANYEXT %1(<vscale x 2 x s1>)
+ $v8 = COPY %0(<vscale x 2 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv2i16_nxv2i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: anyext_nxv2i16_nxv2i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s16>) = G_ANYEXT %1(<vscale x 2 x s1>)
+ $v8 = COPY %0(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv2i32_nxv2i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: anyext_nxv2i32_nxv2i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s32>) = G_ANYEXT %1(<vscale x 2 x s1>)
+ $v8 = COPY %0(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv2i64_nxv2i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: anyext_nxv2i64_nxv2i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m2 = COPY [[DEF]](<vscale x 2 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s64>) = G_ANYEXT %1(<vscale x 2 x s1>)
+ $v8m2 = COPY %0(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: anyext_nxv4i8_nxv4i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: anyext_nxv4i8_nxv4i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 4 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s8>) = G_ANYEXT %1(<vscale x 4 x s1>)
+ $v8 = COPY %0(<vscale x 4 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv4i16_nxv4i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: anyext_nxv4i16_nxv4i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 4 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s16>) = G_ANYEXT %1(<vscale x 4 x s1>)
+ $v8 = COPY %0(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv4i32_nxv4i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: anyext_nxv4i32_nxv4i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m2 = COPY [[DEF]](<vscale x 4 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s32>) = G_ANYEXT %1(<vscale x 4 x s1>)
+ $v8m2 = COPY %0(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+...
+---
+name: anyext_nxv4i64_nxv4i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: anyext_nxv4i64_nxv4i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m4 = COPY [[DEF]](<vscale x 4 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s64>) = G_ANYEXT %1(<vscale x 4 x s1>)
+ $v8m4 = COPY %0(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: anyext_nxv8i8_nxv8i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: anyext_nxv8i8_nxv8i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 8 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s8>) = G_ANYEXT %1(<vscale x 8 x s1>)
+ $v8 = COPY %0(<vscale x 8 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv8i16_nxv8i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: anyext_nxv8i16_nxv8i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m2 = COPY [[DEF]](<vscale x 8 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s16>) = G_ANYEXT %1(<vscale x 8 x s1>)
+ $v8m2 = COPY %0(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+...
+---
+name: anyext_nxv8i32_nxv8i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: anyext_nxv8i32_nxv8i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m4 = COPY [[DEF]](<vscale x 8 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s32>) = G_ANYEXT %1(<vscale x 8 x s1>)
+ $v8m4 = COPY %0(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+...
+---
+name: anyext_nxv8i64_nxv8i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: anyext_nxv8i64_nxv8i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m8 = COPY [[DEF]](<vscale x 8 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s64>) = G_ANYEXT %1(<vscale x 8 x s1>)
+ $v8m8 = COPY %0(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
+---
+name: anyext_nxv16i8_nxv16i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: anyext_nxv16i8_nxv16i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m2 = COPY [[DEF]](<vscale x 16 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s8>) = G_ANYEXT %1(<vscale x 16 x s1>)
+ $v8m2 = COPY %0(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+...
+---
+name: anyext_nxv16i16_nxv16i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: anyext_nxv16i16_nxv16i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m4 = COPY [[DEF]](<vscale x 16 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s16>) = G_ANYEXT %1(<vscale x 16 x s1>)
+ $v8m4 = COPY %0(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+...
+---
+name: anyext_nxv16i32_nxv16i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: anyext_nxv16i32_nxv16i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m8 = COPY [[DEF]](<vscale x 16 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s32>) = G_ANYEXT %1(<vscale x 16 x s1>)
+ $v8m8 = COPY %0(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+...
+---
+name: anyext_nxv32i8_nxv32i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: anyext_nxv32i8_nxv32i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m4 = COPY [[DEF]](<vscale x 32 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 32 x s8>) = G_ANYEXT %1(<vscale x 32 x s1>)
+ $v8m4 = COPY %0(<vscale x 32 x s8>)
+ PseudoRET implicit $v8m4
+...
+---
+name: anyext_nxv32i16_nxv32i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: anyext_nxv32i16_nxv32i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m8 = COPY [[DEF]](<vscale x 32 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 32 x s16>) = G_ANYEXT %1(<vscale x 32 x s1>)
+ $v8m8 = COPY %0(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+...
+---
+name: anyext_nxv64i8_nxv64i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: anyext_nxv64i8_nxv64i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m8 = COPY [[DEF]](<vscale x 64 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 64 x s8>) = G_ANYEXT %1(<vscale x 64 x s1>)
+ $v8m8 = COPY %0(<vscale x 64 x s8>)
+ PseudoRET implicit $v8m8
+...
+
+# Extend from s8 element vectors
+---
+name: anyext_nxv1i16_nxv1i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: anyext_nxv1i16_nxv1i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s16>) = G_ANYEXT %1(<vscale x 1 x s8>)
+ $v8 = COPY %0(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv1i32_nxv1i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: anyext_nxv1i32_nxv1i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s32>) = G_ANYEXT %1(<vscale x 1 x s8>)
+ $v8 = COPY %0(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv1i64_nxv1i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: anyext_nxv1i64_nxv1i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s64>) = G_ANYEXT %1(<vscale x 1 x s8>)
+ $v8 = COPY %0(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv2i16_nxv2i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: anyext_nxv2i16_nxv2i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s16>) = G_ANYEXT %1(<vscale x 2 x s8>)
+ $v8 = COPY %0(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv2i32_nxv2i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: anyext_nxv2i32_nxv2i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s32>) = G_ANYEXT %1(<vscale x 2 x s8>)
+ $v8 = COPY %0(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv2i64_nxv2i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: anyext_nxv2i64_nxv2i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m2 = COPY [[DEF]](<vscale x 2 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s64>) = G_ANYEXT %1(<vscale x 2 x s8>)
+ $v8m2 = COPY %0(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: anyext_nxv4i16_nxv4i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: anyext_nxv4i16_nxv4i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 4 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s16>) = G_ANYEXT %1(<vscale x 4 x s8>)
+ $v8 = COPY %0(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv4i32_nxv4i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: anyext_nxv4i32_nxv4i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m2 = COPY [[DEF]](<vscale x 4 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s32>) = G_ANYEXT %1(<vscale x 4 x s8>)
+ $v8m2 = COPY %0(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+...
+---
+name: anyext_nxv4i64_nxv4i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: anyext_nxv4i64_nxv4i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m4 = COPY [[DEF]](<vscale x 4 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s64>) = G_ANYEXT %1(<vscale x 4 x s8>)
+ $v8m4 = COPY %0(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: anyext_nxv8i16_nxv8i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: anyext_nxv8i16_nxv8i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m2 = COPY [[DEF]](<vscale x 8 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s16>) = G_ANYEXT %1(<vscale x 8 x s8>)
+ $v8m2 = COPY %0(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+...
+---
+name: anyext_nxv8i32_nxv8i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: anyext_nxv8i32_nxv8i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m4 = COPY [[DEF]](<vscale x 8 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s32>) = G_ANYEXT %1(<vscale x 8 x s8>)
+ $v8m4 = COPY %0(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+...
+---
+name: anyext_nxv8i64_nxv8i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: anyext_nxv8i64_nxv8i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m8 = COPY [[DEF]](<vscale x 8 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s64>) = G_ANYEXT %1(<vscale x 8 x s8>)
+ $v8m8 = COPY %0(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
+---
+name: anyext_nxv16i16_nxv16i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: anyext_nxv16i16_nxv16i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m4 = COPY [[DEF]](<vscale x 16 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s16>) = G_ANYEXT %1(<vscale x 16 x s8>)
+ $v8m4 = COPY %0(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+...
+---
+name: anyext_nxv16i32_nxv16i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: anyext_nxv16i32_nxv16i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m8 = COPY [[DEF]](<vscale x 16 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s32>) = G_ANYEXT %1(<vscale x 16 x s8>)
+ $v8m8 = COPY %0(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+...
+---
+name: anyext_nxv32i16_nxv32i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: anyext_nxv32i16_nxv32i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m8 = COPY [[DEF]](<vscale x 32 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 32 x s16>) = G_ANYEXT %1(<vscale x 32 x s8>)
+ $v8m8 = COPY %0(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+...
+
+# Extend from s16 element vectors
+---
+name: anyext_nxv1i32_nxv1i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: anyext_nxv1i32_nxv1i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s32>) = G_ANYEXT %1(<vscale x 1 x s16>)
+ $v8 = COPY %0(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv1i64_nxv1i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: anyext_nxv1i64_nxv1i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s64>) = G_ANYEXT %1(<vscale x 1 x s16>)
+ $v8 = COPY %0(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv2i32_nxv2i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: anyext_nxv2i32_nxv2i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s32>) = G_ANYEXT %1(<vscale x 2 x s16>)
+ $v8 = COPY %0(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv2i64_nxv2i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: anyext_nxv2i64_nxv2i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m2 = COPY [[DEF]](<vscale x 2 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s64>) = G_ANYEXT %1(<vscale x 2 x s16>)
+ $v8m2 = COPY %0(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: anyext_nxv4i32_nxv4i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: anyext_nxv4i32_nxv4i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m2 = COPY [[DEF]](<vscale x 4 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s32>) = G_ANYEXT %1(<vscale x 4 x s16>)
+ $v8m2 = COPY %0(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+...
+---
+name: anyext_nxv4i64_nxv4i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: anyext_nxv4i64_nxv4i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m4 = COPY [[DEF]](<vscale x 4 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s64>) = G_ANYEXT %1(<vscale x 4 x s16>)
+ $v8m4 = COPY %0(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: anyext_nxv8i32_nxv8i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: anyext_nxv8i32_nxv8i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m4 = COPY [[DEF]](<vscale x 8 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s32>) = G_ANYEXT %1(<vscale x 8 x s16>)
+ $v8m4 = COPY %0(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+...
+---
+name: anyext_nxv8i64_nxv8i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: anyext_nxv8i64_nxv8i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m8 = COPY [[DEF]](<vscale x 8 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s64>) = G_ANYEXT %1(<vscale x 8 x s16>)
+ $v8m8 = COPY %0(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
+---
+name: anyext_nxv16i32_nxv16i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: anyext_nxv16i32_nxv16i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m8 = COPY [[DEF]](<vscale x 16 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s32>) = G_ANYEXT %1(<vscale x 16 x s16>)
+ $v8m8 = COPY %0(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+...
+
+# Extend from s32 element vectors
+---
+name: anyext_nxv1i64_nxv1i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: anyext_nxv1i64_nxv1i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s64>) = G_ANYEXT %1(<vscale x 1 x s32>)
+ $v8 = COPY %0(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv2i64_nxv2i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: anyext_nxv2i64_nxv2i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m2 = COPY [[DEF]](<vscale x 2 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s64>) = G_ANYEXT %1(<vscale x 2 x s32>)
+ $v8m2 = COPY %0(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: anyext_nxv4i64_nxv4i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: anyext_nxv4i64_nxv4i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m4 = COPY [[DEF]](<vscale x 4 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s64>) = G_ANYEXT %1(<vscale x 4 x s32>)
+ $v8m4 = COPY %0(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: anyext_nxv8i64_nxv8i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: anyext_nxv8i64_nxv8i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: $v8m8 = COPY [[DEF]](<vscale x 8 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s64>) = G_ANYEXT %1(<vscale x 8 x s32>)
+ $v8m8 = COPY %0(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-sext.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-sext.mir
new file mode 100644
index 00000000000000..61fa3f87f64308
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-sext.mir
@@ -0,0 +1,811 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+
+# Extend from s1 element vectors
+---
+name: sext_nxv1i8_nxv1i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: sext_nxv1i8_nxv1i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SEXT [[DEF]](<vscale x 1 x s1>)
+ ; CHECK-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s8>) = G_SEXT %1(<vscale x 1 x s1>)
+ $v8 = COPY %0(<vscale x 1 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv1i16_nxv1i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: sext_nxv1i16_nxv1i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SEXT [[DEF]](<vscale x 1 x s1>)
+ ; CHECK-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s16>) = G_SEXT %1(<vscale x 1 x s1>)
+ $v8 = COPY %0(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv1i32_nxv1i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: sext_nxv1i32_nxv1i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SEXT [[DEF]](<vscale x 1 x s1>)
+ ; CHECK-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s32>) = G_SEXT %1(<vscale x 1 x s1>)
+ $v8 = COPY %0(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv1i64_nxv1i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: sext_nxv1i64_nxv1i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SEXT [[DEF]](<vscale x 1 x s1>)
+ ; CHECK-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s64>) = G_SEXT %1(<vscale x 1 x s1>)
+ $v8 = COPY %0(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv2i8_nxv2i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: sext_nxv2i8_nxv2i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SEXT [[DEF]](<vscale x 2 x s1>)
+ ; CHECK-NEXT: $v8 = COPY [[SEXT]](<vscale x 2 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s8>) = G_SEXT %1(<vscale x 2 x s1>)
+ $v8 = COPY %0(<vscale x 2 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv2i16_nxv2i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: sext_nxv2i16_nxv2i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SEXT [[DEF]](<vscale x 2 x s1>)
+ ; CHECK-NEXT: $v8 = COPY [[SEXT]](<vscale x 2 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s16>) = G_SEXT %1(<vscale x 2 x s1>)
+ $v8 = COPY %0(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv2i32_nxv2i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: sext_nxv2i32_nxv2i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SEXT [[DEF]](<vscale x 2 x s1>)
+ ; CHECK-NEXT: $v8 = COPY [[SEXT]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s32>) = G_SEXT %1(<vscale x 2 x s1>)
+ $v8 = COPY %0(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv2i64_nxv2i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: sext_nxv2i64_nxv2i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SEXT [[DEF]](<vscale x 2 x s1>)
+ ; CHECK-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 2 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s64>) = G_SEXT %1(<vscale x 2 x s1>)
+ $v8m2 = COPY %0(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: sext_nxv4i8_nxv4i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: sext_nxv4i8_nxv4i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SEXT [[DEF]](<vscale x 4 x s1>)
+ ; CHECK-NEXT: $v8 = COPY [[SEXT]](<vscale x 4 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s8>) = G_SEXT %1(<vscale x 4 x s1>)
+ $v8 = COPY %0(<vscale x 4 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv4i16_nxv4i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: sext_nxv4i16_nxv4i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SEXT [[DEF]](<vscale x 4 x s1>)
+ ; CHECK-NEXT: $v8 = COPY [[SEXT]](<vscale x 4 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s16>) = G_SEXT %1(<vscale x 4 x s1>)
+ $v8 = COPY %0(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv4i32_nxv4i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: sext_nxv4i32_nxv4i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SEXT [[DEF]](<vscale x 4 x s1>)
+ ; CHECK-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 4 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s32>) = G_SEXT %1(<vscale x 4 x s1>)
+ $v8m2 = COPY %0(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+...
+---
+name: sext_nxv4i64_nxv4i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: sext_nxv4i64_nxv4i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SEXT [[DEF]](<vscale x 4 x s1>)
+ ; CHECK-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 4 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s64>) = G_SEXT %1(<vscale x 4 x s1>)
+ $v8m4 = COPY %0(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: sext_nxv8i8_nxv8i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: sext_nxv8i8_nxv8i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SEXT [[DEF]](<vscale x 8 x s1>)
+ ; CHECK-NEXT: $v8 = COPY [[SEXT]](<vscale x 8 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s8>) = G_SEXT %1(<vscale x 8 x s1>)
+ $v8 = COPY %0(<vscale x 8 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv8i16_nxv8i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: sext_nxv8i16_nxv8i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SEXT [[DEF]](<vscale x 8 x s1>)
+ ; CHECK-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 8 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s16>) = G_SEXT %1(<vscale x 8 x s1>)
+ $v8m2 = COPY %0(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+...
+---
+name: sext_nxv8i32_nxv8i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: sext_nxv8i32_nxv8i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SEXT [[DEF]](<vscale x 8 x s1>)
+ ; CHECK-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 8 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s32>) = G_SEXT %1(<vscale x 8 x s1>)
+ $v8m4 = COPY %0(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+...
+---
+name: sext_nxv8i64_nxv8i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: sext_nxv8i64_nxv8i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SEXT [[DEF]](<vscale x 8 x s1>)
+ ; CHECK-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 8 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s64>) = G_SEXT %1(<vscale x 8 x s1>)
+ $v8m8 = COPY %0(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
+---
+name: sext_nxv16i8_nxv16i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: sext_nxv16i8_nxv16i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SEXT [[DEF]](<vscale x 16 x s1>)
+ ; CHECK-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 16 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s8>) = G_SEXT %1(<vscale x 16 x s1>)
+ $v8m2 = COPY %0(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+...
+---
+name: sext_nxv16i16_nxv16i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: sext_nxv16i16_nxv16i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SEXT [[DEF]](<vscale x 16 x s1>)
+ ; CHECK-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 16 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s16>) = G_SEXT %1(<vscale x 16 x s1>)
+ $v8m4 = COPY %0(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+...
+---
+name: sext_nxv16i32_nxv16i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: sext_nxv16i32_nxv16i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SEXT [[DEF]](<vscale x 16 x s1>)
+ ; CHECK-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 16 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s32>) = G_SEXT %1(<vscale x 16 x s1>)
+ $v8m8 = COPY %0(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+...
+---
+name: sext_nxv32i8_nxv32i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: sext_nxv32i8_nxv32i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SEXT [[DEF]](<vscale x 32 x s1>)
+ ; CHECK-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 32 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 32 x s8>) = G_SEXT %1(<vscale x 32 x s1>)
+ $v8m4 = COPY %0(<vscale x 32 x s8>)
+ PseudoRET implicit $v8m4
+...
+---
+name: sext_nxv32i16_nxv32i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: sext_nxv32i16_nxv32i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SEXT [[DEF]](<vscale x 32 x s1>)
+ ; CHECK-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 32 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 32 x s16>) = G_SEXT %1(<vscale x 32 x s1>)
+ $v8m8 = COPY %0(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+...
+---
+name: sext_nxv64i8_nxv64i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: sext_nxv64i8_nxv64i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SEXT [[DEF]](<vscale x 64 x s1>)
+ ; CHECK-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 64 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 64 x s8>) = G_SEXT %1(<vscale x 64 x s1>)
+ $v8m8 = COPY %0(<vscale x 64 x s8>)
+ PseudoRET implicit $v8m8
+...
+
+# Extend from s8 element vectors
+---
+name: sext_nxv1i16_nxv1i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: sext_nxv1i16_nxv1i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SEXT [[DEF]](<vscale x 1 x s8>)
+ ; CHECK-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s16>) = G_SEXT %1(<vscale x 1 x s8>)
+ $v8 = COPY %0(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv1i32_nxv1i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: sext_nxv1i32_nxv1i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SEXT [[DEF]](<vscale x 1 x s8>)
+ ; CHECK-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s32>) = G_SEXT %1(<vscale x 1 x s8>)
+ $v8 = COPY %0(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv1i64_nxv1i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: sext_nxv1i64_nxv1i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SEXT [[DEF]](<vscale x 1 x s8>)
+ ; CHECK-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s64>) = G_SEXT %1(<vscale x 1 x s8>)
+ $v8 = COPY %0(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv2i16_nxv2i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: sext_nxv2i16_nxv2i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SEXT [[DEF]](<vscale x 2 x s8>)
+ ; CHECK-NEXT: $v8 = COPY [[SEXT]](<vscale x 2 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s16>) = G_SEXT %1(<vscale x 2 x s8>)
+ $v8 = COPY %0(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv2i32_nxv2i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: sext_nxv2i32_nxv2i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SEXT [[DEF]](<vscale x 2 x s8>)
+ ; CHECK-NEXT: $v8 = COPY [[SEXT]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s32>) = G_SEXT %1(<vscale x 2 x s8>)
+ $v8 = COPY %0(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv2i64_nxv2i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: sext_nxv2i64_nxv2i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SEXT [[DEF]](<vscale x 2 x s8>)
+ ; CHECK-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 2 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s64>) = G_SEXT %1(<vscale x 2 x s8>)
+ $v8m2 = COPY %0(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: sext_nxv4i16_nxv4i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: sext_nxv4i16_nxv4i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SEXT [[DEF]](<vscale x 4 x s8>)
+ ; CHECK-NEXT: $v8 = COPY [[SEXT]](<vscale x 4 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s16>) = G_SEXT %1(<vscale x 4 x s8>)
+ $v8 = COPY %0(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv4i32_nxv4i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: sext_nxv4i32_nxv4i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SEXT [[DEF]](<vscale x 4 x s8>)
+ ; CHECK-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 4 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s32>) = G_SEXT %1(<vscale x 4 x s8>)
+ $v8m2 = COPY %0(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+...
+---
+name: sext_nxv4i64_nxv4i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: sext_nxv4i64_nxv4i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SEXT [[DEF]](<vscale x 4 x s8>)
+ ; CHECK-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 4 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s64>) = G_SEXT %1(<vscale x 4 x s8>)
+ $v8m4 = COPY %0(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: sext_nxv8i16_nxv8i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: sext_nxv8i16_nxv8i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SEXT [[DEF]](<vscale x 8 x s8>)
+ ; CHECK-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 8 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s16>) = G_SEXT %1(<vscale x 8 x s8>)
+ $v8m2 = COPY %0(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+...
+---
+name: sext_nxv8i32_nxv8i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: sext_nxv8i32_nxv8i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SEXT [[DEF]](<vscale x 8 x s8>)
+ ; CHECK-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 8 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s32>) = G_SEXT %1(<vscale x 8 x s8>)
+ $v8m4 = COPY %0(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+...
+---
+name: sext_nxv8i64_nxv8i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: sext_nxv8i64_nxv8i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SEXT [[DEF]](<vscale x 8 x s8>)
+ ; CHECK-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 8 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s64>) = G_SEXT %1(<vscale x 8 x s8>)
+ $v8m8 = COPY %0(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
+---
+name: sext_nxv16i16_nxv16i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: sext_nxv16i16_nxv16i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SEXT [[DEF]](<vscale x 16 x s8>)
+ ; CHECK-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 16 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s16>) = G_SEXT %1(<vscale x 16 x s8>)
+ $v8m4 = COPY %0(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+...
+---
+name: sext_nxv16i32_nxv16i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: sext_nxv16i32_nxv16i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SEXT [[DEF]](<vscale x 16 x s8>)
+ ; CHECK-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 16 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s32>) = G_SEXT %1(<vscale x 16 x s8>)
+ $v8m8 = COPY %0(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+...
+---
+name: sext_nxv32i16_nxv32i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: sext_nxv32i16_nxv32i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SEXT [[DEF]](<vscale x 32 x s8>)
+ ; CHECK-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 32 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 32 x s16>) = G_SEXT %1(<vscale x 32 x s8>)
+ $v8m8 = COPY %0(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+...
+
+# Extend from s16 element vectors
+---
+name: sext_nxv1i32_nxv1i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: sext_nxv1i32_nxv1i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SEXT [[DEF]](<vscale x 1 x s16>)
+ ; CHECK-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s32>) = G_SEXT %1(<vscale x 1 x s16>)
+ $v8 = COPY %0(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv1i64_nxv1i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: sext_nxv1i64_nxv1i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SEXT [[DEF]](<vscale x 1 x s16>)
+ ; CHECK-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s64>) = G_SEXT %1(<vscale x 1 x s16>)
+ $v8 = COPY %0(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv2i32_nxv2i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: sext_nxv2i32_nxv2i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SEXT [[DEF]](<vscale x 2 x s16>)
+ ; CHECK-NEXT: $v8 = COPY [[SEXT]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s32>) = G_SEXT %1(<vscale x 2 x s16>)
+ $v8 = COPY %0(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv2i64_nxv2i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: sext_nxv2i64_nxv2i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SEXT [[DEF]](<vscale x 2 x s16>)
+ ; CHECK-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 2 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s64>) = G_SEXT %1(<vscale x 2 x s16>)
+ $v8m2 = COPY %0(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: sext_nxv4i32_nxv4i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: sext_nxv4i32_nxv4i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SEXT [[DEF]](<vscale x 4 x s16>)
+ ; CHECK-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 4 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s32>) = G_SEXT %1(<vscale x 4 x s16>)
+ $v8m2 = COPY %0(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+...
+---
+name: sext_nxv4i64_nxv4i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: sext_nxv4i64_nxv4i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SEXT [[DEF]](<vscale x 4 x s16>)
+ ; CHECK-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 4 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s64>) = G_SEXT %1(<vscale x 4 x s16>)
+ $v8m4 = COPY %0(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: sext_nxv8i32_nxv8i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: sext_nxv8i32_nxv8i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SEXT [[DEF]](<vscale x 8 x s16>)
+ ; CHECK-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 8 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s32>) = G_SEXT %1(<vscale x 8 x s16>)
+ $v8m4 = COPY %0(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+...
+---
+name: sext_nxv8i64_nxv8i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: sext_nxv8i64_nxv8i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SEXT [[DEF]](<vscale x 8 x s16>)
+ ; CHECK-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 8 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s64>) = G_SEXT %1(<vscale x 8 x s16>)
+ $v8m8 = COPY %0(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
+---
+name: sext_nxv16i32_nxv16i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: sext_nxv16i32_nxv16i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SEXT [[DEF]](<vscale x 16 x s16>)
+ ; CHECK-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 16 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s32>) = G_SEXT %1(<vscale x 16 x s16>)
+ $v8m8 = COPY %0(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+...
+
+# Extend from s32 element vectors
+---
+name: sext_nxv1i64_nxv1i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: sext_nxv1i64_nxv1i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SEXT [[DEF]](<vscale x 1 x s32>)
+ ; CHECK-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s64>) = G_SEXT %1(<vscale x 1 x s32>)
+ $v8 = COPY %0(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv2i64_nxv2i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: sext_nxv2i64_nxv2i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SEXT [[DEF]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 2 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s64>) = G_SEXT %1(<vscale x 2 x s32>)
+ $v8m2 = COPY %0(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: sext_nxv4i64_nxv4i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: sext_nxv4i64_nxv4i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SEXT [[DEF]](<vscale x 4 x s32>)
+ ; CHECK-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 4 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s64>) = G_SEXT %1(<vscale x 4 x s32>)
+ $v8m4 = COPY %0(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: sext_nxv8i64_nxv8i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: sext_nxv8i64_nxv8i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SEXT [[DEF]](<vscale x 8 x s32>)
+ ; CHECK-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 8 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s64>) = G_SEXT %1(<vscale x 8 x s32>)
+ $v8m8 = COPY %0(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-zext.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-zext.mir
new file mode 100644
index 00000000000000..47b2bd5bd07113
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-zext.mir
@@ -0,0 +1,811 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+
+# Extend from s1 element vectors
+---
+name: zext_nxv1i8_nxv1i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: zext_nxv1i8_nxv1i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 1 x s8>) = G_ZEXT [[DEF]](<vscale x 1 x s1>)
+ ; CHECK-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s8>) = G_ZEXT %1(<vscale x 1 x s1>)
+ $v8 = COPY %0(<vscale x 1 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv1i16_nxv1i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: zext_nxv1i16_nxv1i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 1 x s16>) = G_ZEXT [[DEF]](<vscale x 1 x s1>)
+ ; CHECK-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s16>) = G_ZEXT %1(<vscale x 1 x s1>)
+ $v8 = COPY %0(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv1i32_nxv1i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: zext_nxv1i32_nxv1i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 1 x s32>) = G_ZEXT [[DEF]](<vscale x 1 x s1>)
+ ; CHECK-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s32>) = G_ZEXT %1(<vscale x 1 x s1>)
+ $v8 = COPY %0(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv1i64_nxv1i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: zext_nxv1i64_nxv1i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 1 x s64>) = G_ZEXT [[DEF]](<vscale x 1 x s1>)
+ ; CHECK-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s64>) = G_ZEXT %1(<vscale x 1 x s1>)
+ $v8 = COPY %0(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv2i8_nxv2i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: zext_nxv2i8_nxv2i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 2 x s8>) = G_ZEXT [[DEF]](<vscale x 2 x s1>)
+ ; CHECK-NEXT: $v8 = COPY [[ZEXT]](<vscale x 2 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s8>) = G_ZEXT %1(<vscale x 2 x s1>)
+ $v8 = COPY %0(<vscale x 2 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv2i16_nxv2i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: zext_nxv2i16_nxv2i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 2 x s16>) = G_ZEXT [[DEF]](<vscale x 2 x s1>)
+ ; CHECK-NEXT: $v8 = COPY [[ZEXT]](<vscale x 2 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s16>) = G_ZEXT %1(<vscale x 2 x s1>)
+ $v8 = COPY %0(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv2i32_nxv2i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: zext_nxv2i32_nxv2i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 2 x s32>) = G_ZEXT [[DEF]](<vscale x 2 x s1>)
+ ; CHECK-NEXT: $v8 = COPY [[ZEXT]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s32>) = G_ZEXT %1(<vscale x 2 x s1>)
+ $v8 = COPY %0(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv2i64_nxv2i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: zext_nxv2i64_nxv2i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 2 x s64>) = G_ZEXT [[DEF]](<vscale x 2 x s1>)
+ ; CHECK-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 2 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s64>) = G_ZEXT %1(<vscale x 2 x s1>)
+ $v8m2 = COPY %0(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: zext_nxv4i8_nxv4i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: zext_nxv4i8_nxv4i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 4 x s8>) = G_ZEXT [[DEF]](<vscale x 4 x s1>)
+ ; CHECK-NEXT: $v8 = COPY [[ZEXT]](<vscale x 4 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s8>) = G_ZEXT %1(<vscale x 4 x s1>)
+ $v8 = COPY %0(<vscale x 4 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv4i16_nxv4i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: zext_nxv4i16_nxv4i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 4 x s16>) = G_ZEXT [[DEF]](<vscale x 4 x s1>)
+ ; CHECK-NEXT: $v8 = COPY [[ZEXT]](<vscale x 4 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s16>) = G_ZEXT %1(<vscale x 4 x s1>)
+ $v8 = COPY %0(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv4i32_nxv4i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: zext_nxv4i32_nxv4i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 4 x s32>) = G_ZEXT [[DEF]](<vscale x 4 x s1>)
+ ; CHECK-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 4 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s32>) = G_ZEXT %1(<vscale x 4 x s1>)
+ $v8m2 = COPY %0(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+...
+---
+name: zext_nxv4i64_nxv4i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: zext_nxv4i64_nxv4i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 4 x s64>) = G_ZEXT [[DEF]](<vscale x 4 x s1>)
+ ; CHECK-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 4 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s64>) = G_ZEXT %1(<vscale x 4 x s1>)
+ $v8m4 = COPY %0(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: zext_nxv8i8_nxv8i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: zext_nxv8i8_nxv8i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 8 x s8>) = G_ZEXT [[DEF]](<vscale x 8 x s1>)
+ ; CHECK-NEXT: $v8 = COPY [[ZEXT]](<vscale x 8 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s8>) = G_ZEXT %1(<vscale x 8 x s1>)
+ $v8 = COPY %0(<vscale x 8 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv8i16_nxv8i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: zext_nxv8i16_nxv8i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 8 x s16>) = G_ZEXT [[DEF]](<vscale x 8 x s1>)
+ ; CHECK-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 8 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s16>) = G_ZEXT %1(<vscale x 8 x s1>)
+ $v8m2 = COPY %0(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+...
+---
+name: zext_nxv8i32_nxv8i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: zext_nxv8i32_nxv8i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 8 x s32>) = G_ZEXT [[DEF]](<vscale x 8 x s1>)
+ ; CHECK-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 8 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s32>) = G_ZEXT %1(<vscale x 8 x s1>)
+ $v8m4 = COPY %0(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+...
+---
+name: zext_nxv8i64_nxv8i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: zext_nxv8i64_nxv8i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 8 x s64>) = G_ZEXT [[DEF]](<vscale x 8 x s1>)
+ ; CHECK-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 8 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s64>) = G_ZEXT %1(<vscale x 8 x s1>)
+ $v8m8 = COPY %0(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
+---
+name: zext_nxv16i8_nxv16i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: zext_nxv16i8_nxv16i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 16 x s8>) = G_ZEXT [[DEF]](<vscale x 16 x s1>)
+ ; CHECK-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 16 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s8>) = G_ZEXT %1(<vscale x 16 x s1>)
+ $v8m2 = COPY %0(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+...
+---
+name: zext_nxv16i16_nxv16i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: zext_nxv16i16_nxv16i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 16 x s16>) = G_ZEXT [[DEF]](<vscale x 16 x s1>)
+ ; CHECK-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 16 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s16>) = G_ZEXT %1(<vscale x 16 x s1>)
+ $v8m4 = COPY %0(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+...
+---
+name: zext_nxv16i32_nxv16i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: zext_nxv16i32_nxv16i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ZEXT [[DEF]](<vscale x 16 x s1>)
+ ; CHECK-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 16 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s32>) = G_ZEXT %1(<vscale x 16 x s1>)
+ $v8m8 = COPY %0(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+...
+---
+name: zext_nxv32i8_nxv32i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: zext_nxv32i8_nxv32i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 32 x s8>) = G_ZEXT [[DEF]](<vscale x 32 x s1>)
+ ; CHECK-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 32 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 32 x s8>) = G_ZEXT %1(<vscale x 32 x s1>)
+ $v8m4 = COPY %0(<vscale x 32 x s8>)
+ PseudoRET implicit $v8m4
+...
+---
+name: zext_nxv32i16_nxv32i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: zext_nxv32i16_nxv32i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 32 x s16>) = G_ZEXT [[DEF]](<vscale x 32 x s1>)
+ ; CHECK-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 32 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 32 x s16>) = G_ZEXT %1(<vscale x 32 x s1>)
+ $v8m8 = COPY %0(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+...
+---
+name: zext_nxv64i8_nxv64i1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: zext_nxv64i8_nxv64i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 64 x s8>) = G_ZEXT [[DEF]](<vscale x 64 x s1>)
+ ; CHECK-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 64 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 64 x s8>) = G_ZEXT %1(<vscale x 64 x s1>)
+ $v8m8 = COPY %0(<vscale x 64 x s8>)
+ PseudoRET implicit $v8m8
+...
+
+# Extend from s8 element vectors
+---
+name: zext_nxv1i16_nxv1i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: zext_nxv1i16_nxv1i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 1 x s16>) = G_ZEXT [[DEF]](<vscale x 1 x s8>)
+ ; CHECK-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s16>) = G_ZEXT %1(<vscale x 1 x s8>)
+ $v8 = COPY %0(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv1i32_nxv1i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: zext_nxv1i32_nxv1i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 1 x s32>) = G_ZEXT [[DEF]](<vscale x 1 x s8>)
+ ; CHECK-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s32>) = G_ZEXT %1(<vscale x 1 x s8>)
+ $v8 = COPY %0(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv1i64_nxv1i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: zext_nxv1i64_nxv1i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 1 x s64>) = G_ZEXT [[DEF]](<vscale x 1 x s8>)
+ ; CHECK-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s64>) = G_ZEXT %1(<vscale x 1 x s8>)
+ $v8 = COPY %0(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv2i16_nxv2i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: zext_nxv2i16_nxv2i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 2 x s16>) = G_ZEXT [[DEF]](<vscale x 2 x s8>)
+ ; CHECK-NEXT: $v8 = COPY [[ZEXT]](<vscale x 2 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s16>) = G_ZEXT %1(<vscale x 2 x s8>)
+ $v8 = COPY %0(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv2i32_nxv2i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: zext_nxv2i32_nxv2i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 2 x s32>) = G_ZEXT [[DEF]](<vscale x 2 x s8>)
+ ; CHECK-NEXT: $v8 = COPY [[ZEXT]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s32>) = G_ZEXT %1(<vscale x 2 x s8>)
+ $v8 = COPY %0(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv2i64_nxv2i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: zext_nxv2i64_nxv2i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 2 x s64>) = G_ZEXT [[DEF]](<vscale x 2 x s8>)
+ ; CHECK-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 2 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s64>) = G_ZEXT %1(<vscale x 2 x s8>)
+ $v8m2 = COPY %0(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: zext_nxv4i16_nxv4i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: zext_nxv4i16_nxv4i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 4 x s16>) = G_ZEXT [[DEF]](<vscale x 4 x s8>)
+ ; CHECK-NEXT: $v8 = COPY [[ZEXT]](<vscale x 4 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s16>) = G_ZEXT %1(<vscale x 4 x s8>)
+ $v8 = COPY %0(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv4i32_nxv4i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: zext_nxv4i32_nxv4i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 4 x s32>) = G_ZEXT [[DEF]](<vscale x 4 x s8>)
+ ; CHECK-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 4 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s32>) = G_ZEXT %1(<vscale x 4 x s8>)
+ $v8m2 = COPY %0(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+...
+---
+name: zext_nxv4i64_nxv4i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: zext_nxv4i64_nxv4i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 4 x s64>) = G_ZEXT [[DEF]](<vscale x 4 x s8>)
+ ; CHECK-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 4 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s64>) = G_ZEXT %1(<vscale x 4 x s8>)
+ $v8m4 = COPY %0(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: zext_nxv8i16_nxv8i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: zext_nxv8i16_nxv8i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 8 x s16>) = G_ZEXT [[DEF]](<vscale x 8 x s8>)
+ ; CHECK-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 8 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s16>) = G_ZEXT %1(<vscale x 8 x s8>)
+ $v8m2 = COPY %0(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+...
+---
+name: zext_nxv8i32_nxv8i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: zext_nxv8i32_nxv8i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 8 x s32>) = G_ZEXT [[DEF]](<vscale x 8 x s8>)
+ ; CHECK-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 8 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s32>) = G_ZEXT %1(<vscale x 8 x s8>)
+ $v8m4 = COPY %0(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+...
+---
+name: zext_nxv8i64_nxv8i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: zext_nxv8i64_nxv8i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 8 x s64>) = G_ZEXT [[DEF]](<vscale x 8 x s8>)
+ ; CHECK-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 8 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s64>) = G_ZEXT %1(<vscale x 8 x s8>)
+ $v8m8 = COPY %0(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
+---
+name: zext_nxv16i16_nxv16i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: zext_nxv16i16_nxv16i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 16 x s16>) = G_ZEXT [[DEF]](<vscale x 16 x s8>)
+ ; CHECK-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 16 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s16>) = G_ZEXT %1(<vscale x 16 x s8>)
+ $v8m4 = COPY %0(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+...
+---
+name: zext_nxv16i32_nxv16i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: zext_nxv16i32_nxv16i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ZEXT [[DEF]](<vscale x 16 x s8>)
+ ; CHECK-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 16 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s32>) = G_ZEXT %1(<vscale x 16 x s8>)
+ $v8m8 = COPY %0(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+...
+---
+name: zext_nxv32i16_nxv32i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: zext_nxv32i16_nxv32i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 32 x s16>) = G_ZEXT [[DEF]](<vscale x 32 x s8>)
+ ; CHECK-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 32 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 32 x s16>) = G_ZEXT %1(<vscale x 32 x s8>)
+ $v8m8 = COPY %0(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+...
+
+# Extend from s16 element vectors
+---
+name: zext_nxv1i32_nxv1i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: zext_nxv1i32_nxv1i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 1 x s32>) = G_ZEXT [[DEF]](<vscale x 1 x s16>)
+ ; CHECK-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s32>) = G_ZEXT %1(<vscale x 1 x s16>)
+ $v8 = COPY %0(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv1i64_nxv1i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: zext_nxv1i64_nxv1i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 1 x s64>) = G_ZEXT [[DEF]](<vscale x 1 x s16>)
+ ; CHECK-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s64>) = G_ZEXT %1(<vscale x 1 x s16>)
+ $v8 = COPY %0(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv2i32_nxv2i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: zext_nxv2i32_nxv2i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 2 x s32>) = G_ZEXT [[DEF]](<vscale x 2 x s16>)
+ ; CHECK-NEXT: $v8 = COPY [[ZEXT]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s32>) = G_ZEXT %1(<vscale x 2 x s16>)
+ $v8 = COPY %0(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv2i64_nxv2i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: zext_nxv2i64_nxv2i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 2 x s64>) = G_ZEXT [[DEF]](<vscale x 2 x s16>)
+ ; CHECK-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 2 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s64>) = G_ZEXT %1(<vscale x 2 x s16>)
+ $v8m2 = COPY %0(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: zext_nxv4i32_nxv4i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: zext_nxv4i32_nxv4i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 4 x s32>) = G_ZEXT [[DEF]](<vscale x 4 x s16>)
+ ; CHECK-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 4 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s32>) = G_ZEXT %1(<vscale x 4 x s16>)
+ $v8m2 = COPY %0(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+...
+---
+name: zext_nxv4i64_nxv4i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: zext_nxv4i64_nxv4i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 4 x s64>) = G_ZEXT [[DEF]](<vscale x 4 x s16>)
+ ; CHECK-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 4 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s64>) = G_ZEXT %1(<vscale x 4 x s16>)
+ $v8m4 = COPY %0(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: zext_nxv8i32_nxv8i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: zext_nxv8i32_nxv8i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 8 x s32>) = G_ZEXT [[DEF]](<vscale x 8 x s16>)
+ ; CHECK-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 8 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s32>) = G_ZEXT %1(<vscale x 8 x s16>)
+ $v8m4 = COPY %0(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+...
+---
+name: zext_nxv8i64_nxv8i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: zext_nxv8i64_nxv8i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 8 x s64>) = G_ZEXT [[DEF]](<vscale x 8 x s16>)
+ ; CHECK-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 8 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s64>) = G_ZEXT %1(<vscale x 8 x s16>)
+ $v8m8 = COPY %0(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
+---
+name: zext_nxv16i32_nxv16i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: zext_nxv16i32_nxv16i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ZEXT [[DEF]](<vscale x 16 x s16>)
+ ; CHECK-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 16 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s32>) = G_ZEXT %1(<vscale x 16 x s16>)
+ $v8m8 = COPY %0(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+...
+
+# Extend from s32 element vectors
+---
+name: zext_nxv1i64_nxv1i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: zext_nxv1i64_nxv1i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 1 x s64>) = G_ZEXT [[DEF]](<vscale x 1 x s32>)
+ ; CHECK-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s64>) = G_ZEXT %1(<vscale x 1 x s32>)
+ $v8 = COPY %0(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv2i64_nxv2i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: zext_nxv2i64_nxv2i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 2 x s64>) = G_ZEXT [[DEF]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 2 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s64>) = G_ZEXT %1(<vscale x 2 x s32>)
+ $v8m2 = COPY %0(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: zext_nxv4i64_nxv4i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: zext_nxv4i64_nxv4i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 4 x s64>) = G_ZEXT [[DEF]](<vscale x 4 x s32>)
+ ; CHECK-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 4 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s64>) = G_ZEXT %1(<vscale x 4 x s32>)
+ $v8m4 = COPY %0(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: zext_nxv8i64_nxv8i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: zext_nxv8i64_nxv8i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(<vscale x 8 x s64>) = G_ZEXT [[DEF]](<vscale x 8 x s32>)
+ ; CHECK-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 8 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s64>) = G_ZEXT %1(<vscale x 8 x s32>)
+ $v8m8 = COPY %0(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
>From 63b54cc66a850e7f35fdd17cff2f9d4feebe6daf Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Fri, 15 Mar 2024 07:35:55 -0700
Subject: [PATCH 2/9] [RISCV][GISEL] Regbankselect for G_ZEXT, G_SEXT, and
G_ANYEXT with scalable vector type
---
.../RISCV/GISel/RISCVRegisterBankInfo.cpp | 11 +-
.../GlobalISel/regbankselect/rvv/anyext.mir | 1115 +++++++++++++++++
.../GlobalISel/regbankselect/rvv/sext.mir | 1115 +++++++++++++++++
.../GlobalISel/regbankselect/rvv/zext.mir | 1115 +++++++++++++++++
4 files changed, 3351 insertions(+), 5 deletions(-)
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/anyext.mir
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/sext.mir
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/zext.mir
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
index 888bcc46ea1ef9..aae4e37cf08f66 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
@@ -290,7 +290,10 @@ RISCVRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
switch (Opc) {
case TargetOpcode::G_ADD:
- case TargetOpcode::G_SUB: {
+ case TargetOpcode::G_SUB:
+ case TargetOpcode::G_ANYEXT:
+ case TargetOpcode::G_SEXT:
+ case TargetOpcode::G_ZEXT: {
if (MRI.getType(MI.getOperand(0).getReg()).isVector()) {
LLT Ty = MRI.getType(MI.getOperand(0).getReg());
return getInstructionMapping(
@@ -298,8 +301,9 @@ RISCVRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
getVRBValueMapping(Ty.getSizeInBits().getKnownMinValue()),
NumOperands);
}
+ return getInstructionMapping(DefaultMappingID, /*Cost=*/1, GPRValueMapping,
+ NumOperands);
}
- LLVM_FALLTHROUGH;
case TargetOpcode::G_SHL:
case TargetOpcode::G_ASHR:
case TargetOpcode::G_LSHR:
@@ -321,9 +325,6 @@ RISCVRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
case TargetOpcode::G_PTRTOINT:
case TargetOpcode::G_INTTOPTR:
case TargetOpcode::G_TRUNC:
- case TargetOpcode::G_ANYEXT:
- case TargetOpcode::G_SEXT:
- case TargetOpcode::G_ZEXT:
case TargetOpcode::G_SEXTLOAD:
case TargetOpcode::G_ZEXTLOAD:
return getInstructionMapping(DefaultMappingID, /*Cost=*/1, GPRValueMapping,
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/anyext.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/anyext.mir
new file mode 100644
index 00000000000000..b92279f4c1d942
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/anyext.mir
@@ -0,0 +1,1115 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+m,+v -run-pass=regbankselect \
+# RUN: -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN: -o - | FileCheck -check-prefix=RV32I %s
+# RUN: llc -mtriple=riscv64 -mattr=+m,+v -run-pass=regbankselect \
+# RUN: -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN: -o - | FileCheck -check-prefix=RV64I %s
+
+# Extend from s1 element vectors
+---
+name: anyext_nxv1i8_nxv1i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv1i8_nxv1i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_ANYEXT [[DEF]](<vscale x 1 x s1>)
+ ; RV32I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: anyext_nxv1i8_nxv1i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_ANYEXT [[DEF]](<vscale x 1 x s1>)
+ ; RV64I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s8>) = G_ANYEXT %1(<vscale x 1 x s1>)
+ $v8 = COPY %0(<vscale x 1 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv1i16_nxv1i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv1i16_nxv1i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_ANYEXT [[DEF]](<vscale x 1 x s1>)
+ ; RV32I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: anyext_nxv1i16_nxv1i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_ANYEXT [[DEF]](<vscale x 1 x s1>)
+ ; RV64I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s16>) = G_ANYEXT %1(<vscale x 1 x s1>)
+ $v8 = COPY %0(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv1i32_nxv1i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv1i32_nxv1i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_ANYEXT [[DEF]](<vscale x 1 x s1>)
+ ; RV32I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: anyext_nxv1i32_nxv1i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_ANYEXT [[DEF]](<vscale x 1 x s1>)
+ ; RV64I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s32>) = G_ANYEXT %1(<vscale x 1 x s1>)
+ $v8 = COPY %0(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv1i64_nxv1i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv1i64_nxv1i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_ANYEXT [[DEF]](<vscale x 1 x s1>)
+ ; RV32I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: anyext_nxv1i64_nxv1i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_ANYEXT [[DEF]](<vscale x 1 x s1>)
+ ; RV64I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s64>) = G_ANYEXT %1(<vscale x 1 x s1>)
+ $v8 = COPY %0(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv2i8_nxv2i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv2i8_nxv2i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_ANYEXT [[DEF]](<vscale x 2 x s1>)
+ ; RV32I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 2 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: anyext_nxv2i8_nxv2i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_ANYEXT [[DEF]](<vscale x 2 x s1>)
+ ; RV64I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 2 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s8>) = G_ANYEXT %1(<vscale x 2 x s1>)
+ $v8 = COPY %0(<vscale x 2 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv2i16_nxv2i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv2i16_nxv2i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_ANYEXT [[DEF]](<vscale x 2 x s1>)
+ ; RV32I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 2 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: anyext_nxv2i16_nxv2i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_ANYEXT [[DEF]](<vscale x 2 x s1>)
+ ; RV64I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 2 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s16>) = G_ANYEXT %1(<vscale x 2 x s1>)
+ $v8 = COPY %0(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv2i32_nxv2i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv2i32_nxv2i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_ANYEXT [[DEF]](<vscale x 2 x s1>)
+ ; RV32I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 2 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: anyext_nxv2i32_nxv2i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_ANYEXT [[DEF]](<vscale x 2 x s1>)
+ ; RV64I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 2 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s32>) = G_ANYEXT %1(<vscale x 2 x s1>)
+ $v8 = COPY %0(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv2i64_nxv2i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv2i64_nxv2i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_ANYEXT [[DEF]](<vscale x 2 x s1>)
+ ; RV32I-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 2 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: anyext_nxv2i64_nxv2i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_ANYEXT [[DEF]](<vscale x 2 x s1>)
+ ; RV64I-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 2 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s64>) = G_ANYEXT %1(<vscale x 2 x s1>)
+ $v8m2 = COPY %0(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: anyext_nxv4i8_nxv4i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv4i8_nxv4i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_ANYEXT [[DEF]](<vscale x 4 x s1>)
+ ; RV32I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 4 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: anyext_nxv4i8_nxv4i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_ANYEXT [[DEF]](<vscale x 4 x s1>)
+ ; RV64I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 4 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s8>) = G_ANYEXT %1(<vscale x 4 x s1>)
+ $v8 = COPY %0(<vscale x 4 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv4i16_nxv4i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv4i16_nxv4i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_ANYEXT [[DEF]](<vscale x 4 x s1>)
+ ; RV32I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 4 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: anyext_nxv4i16_nxv4i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_ANYEXT [[DEF]](<vscale x 4 x s1>)
+ ; RV64I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 4 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s16>) = G_ANYEXT %1(<vscale x 4 x s1>)
+ $v8 = COPY %0(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv4i32_nxv4i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv4i32_nxv4i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_ANYEXT [[DEF]](<vscale x 4 x s1>)
+ ; RV32I-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 4 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: anyext_nxv4i32_nxv4i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_ANYEXT [[DEF]](<vscale x 4 x s1>)
+ ; RV64I-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 4 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s32>) = G_ANYEXT %1(<vscale x 4 x s1>)
+ $v8m2 = COPY %0(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+...
+---
+name: anyext_nxv4i64_nxv4i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv4i64_nxv4i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_ANYEXT [[DEF]](<vscale x 4 x s1>)
+ ; RV32I-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 4 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: anyext_nxv4i64_nxv4i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_ANYEXT [[DEF]](<vscale x 4 x s1>)
+ ; RV64I-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 4 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s64>) = G_ANYEXT %1(<vscale x 4 x s1>)
+ $v8m4 = COPY %0(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: anyext_nxv8i8_nxv8i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv8i8_nxv8i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_ANYEXT [[DEF]](<vscale x 8 x s1>)
+ ; RV32I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 8 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: anyext_nxv8i8_nxv8i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_ANYEXT [[DEF]](<vscale x 8 x s1>)
+ ; RV64I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 8 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s8>) = G_ANYEXT %1(<vscale x 8 x s1>)
+ $v8 = COPY %0(<vscale x 8 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv8i16_nxv8i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv8i16_nxv8i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_ANYEXT [[DEF]](<vscale x 8 x s1>)
+ ; RV32I-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 8 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: anyext_nxv8i16_nxv8i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_ANYEXT [[DEF]](<vscale x 8 x s1>)
+ ; RV64I-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 8 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s16>) = G_ANYEXT %1(<vscale x 8 x s1>)
+ $v8m2 = COPY %0(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+...
+---
+name: anyext_nxv8i32_nxv8i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv8i32_nxv8i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_ANYEXT [[DEF]](<vscale x 8 x s1>)
+ ; RV32I-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 8 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: anyext_nxv8i32_nxv8i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_ANYEXT [[DEF]](<vscale x 8 x s1>)
+ ; RV64I-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 8 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s32>) = G_ANYEXT %1(<vscale x 8 x s1>)
+ $v8m4 = COPY %0(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+...
+---
+name: anyext_nxv8i64_nxv8i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv8i64_nxv8i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_ANYEXT [[DEF]](<vscale x 8 x s1>)
+ ; RV32I-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 8 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: anyext_nxv8i64_nxv8i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_ANYEXT [[DEF]](<vscale x 8 x s1>)
+ ; RV64I-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 8 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s64>) = G_ANYEXT %1(<vscale x 8 x s1>)
+ $v8m8 = COPY %0(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
+---
+name: anyext_nxv16i8_nxv16i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv16i8_nxv16i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_ANYEXT [[DEF]](<vscale x 16 x s1>)
+ ; RV32I-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 16 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: anyext_nxv16i8_nxv16i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_ANYEXT [[DEF]](<vscale x 16 x s1>)
+ ; RV64I-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 16 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s8>) = G_ANYEXT %1(<vscale x 16 x s1>)
+ $v8m2 = COPY %0(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+...
+---
+name: anyext_nxv16i16_nxv16i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv16i16_nxv16i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_ANYEXT [[DEF]](<vscale x 16 x s1>)
+ ; RV32I-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 16 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: anyext_nxv16i16_nxv16i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_ANYEXT [[DEF]](<vscale x 16 x s1>)
+ ; RV64I-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 16 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s16>) = G_ANYEXT %1(<vscale x 16 x s1>)
+ $v8m4 = COPY %0(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+...
+---
+name: anyext_nxv16i32_nxv16i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv16i32_nxv16i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_ANYEXT [[DEF]](<vscale x 16 x s1>)
+ ; RV32I-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 16 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: anyext_nxv16i32_nxv16i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_ANYEXT [[DEF]](<vscale x 16 x s1>)
+ ; RV64I-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 16 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s32>) = G_ANYEXT %1(<vscale x 16 x s1>)
+ $v8m8 = COPY %0(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+...
+---
+name: anyext_nxv32i8_nxv32i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv32i8_nxv32i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_ANYEXT [[DEF]](<vscale x 32 x s1>)
+ ; RV32I-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 32 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: anyext_nxv32i8_nxv32i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_ANYEXT [[DEF]](<vscale x 32 x s1>)
+ ; RV64I-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 32 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 32 x s8>) = G_ANYEXT %1(<vscale x 32 x s1>)
+ $v8m4 = COPY %0(<vscale x 32 x s8>)
+ PseudoRET implicit $v8m4
+...
+---
+name: anyext_nxv32i16_nxv32i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv32i16_nxv32i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_ANYEXT [[DEF]](<vscale x 32 x s1>)
+ ; RV32I-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 32 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: anyext_nxv32i16_nxv32i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_ANYEXT [[DEF]](<vscale x 32 x s1>)
+ ; RV64I-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 32 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 32 x s16>) = G_ANYEXT %1(<vscale x 32 x s1>)
+ $v8m8 = COPY %0(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+...
+---
+name: anyext_nxv64i8_nxv64i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv64i8_nxv64i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_ANYEXT [[DEF]](<vscale x 64 x s1>)
+ ; RV32I-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 64 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: anyext_nxv64i8_nxv64i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_ANYEXT [[DEF]](<vscale x 64 x s1>)
+ ; RV64I-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 64 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 64 x s8>) = G_ANYEXT %1(<vscale x 64 x s1>)
+ $v8m8 = COPY %0(<vscale x 64 x s8>)
+ PseudoRET implicit $v8m8
+...
+
+# Extend from s8 element vectors
+---
+name: anyext_nxv1i16_nxv1i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv1i16_nxv1i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_ANYEXT [[DEF]](<vscale x 1 x s8>)
+ ; RV32I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: anyext_nxv1i16_nxv1i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_ANYEXT [[DEF]](<vscale x 1 x s8>)
+ ; RV64I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s16>) = G_ANYEXT %1(<vscale x 1 x s8>)
+ $v8 = COPY %0(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv1i32_nxv1i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv1i32_nxv1i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_ANYEXT [[DEF]](<vscale x 1 x s8>)
+ ; RV32I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: anyext_nxv1i32_nxv1i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_ANYEXT [[DEF]](<vscale x 1 x s8>)
+ ; RV64I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s32>) = G_ANYEXT %1(<vscale x 1 x s8>)
+ $v8 = COPY %0(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv1i64_nxv1i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv1i64_nxv1i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_ANYEXT [[DEF]](<vscale x 1 x s8>)
+ ; RV32I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: anyext_nxv1i64_nxv1i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_ANYEXT [[DEF]](<vscale x 1 x s8>)
+ ; RV64I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s64>) = G_ANYEXT %1(<vscale x 1 x s8>)
+ $v8 = COPY %0(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv2i16_nxv2i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv2i16_nxv2i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_ANYEXT [[DEF]](<vscale x 2 x s8>)
+ ; RV32I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 2 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: anyext_nxv2i16_nxv2i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_ANYEXT [[DEF]](<vscale x 2 x s8>)
+ ; RV64I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 2 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s16>) = G_ANYEXT %1(<vscale x 2 x s8>)
+ $v8 = COPY %0(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv2i32_nxv2i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv2i32_nxv2i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_ANYEXT [[DEF]](<vscale x 2 x s8>)
+ ; RV32I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 2 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: anyext_nxv2i32_nxv2i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_ANYEXT [[DEF]](<vscale x 2 x s8>)
+ ; RV64I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 2 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s32>) = G_ANYEXT %1(<vscale x 2 x s8>)
+ $v8 = COPY %0(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv2i64_nxv2i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv2i64_nxv2i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_ANYEXT [[DEF]](<vscale x 2 x s8>)
+ ; RV32I-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 2 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: anyext_nxv2i64_nxv2i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_ANYEXT [[DEF]](<vscale x 2 x s8>)
+ ; RV64I-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 2 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s64>) = G_ANYEXT %1(<vscale x 2 x s8>)
+ $v8m2 = COPY %0(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: anyext_nxv4i16_nxv4i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv4i16_nxv4i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_ANYEXT [[DEF]](<vscale x 4 x s8>)
+ ; RV32I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 4 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: anyext_nxv4i16_nxv4i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_ANYEXT [[DEF]](<vscale x 4 x s8>)
+ ; RV64I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 4 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s16>) = G_ANYEXT %1(<vscale x 4 x s8>)
+ $v8 = COPY %0(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv4i32_nxv4i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv4i32_nxv4i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_ANYEXT [[DEF]](<vscale x 4 x s8>)
+ ; RV32I-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 4 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: anyext_nxv4i32_nxv4i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_ANYEXT [[DEF]](<vscale x 4 x s8>)
+ ; RV64I-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 4 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s32>) = G_ANYEXT %1(<vscale x 4 x s8>)
+ $v8m2 = COPY %0(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+...
+---
+name: anyext_nxv4i64_nxv4i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv4i64_nxv4i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_ANYEXT [[DEF]](<vscale x 4 x s8>)
+ ; RV32I-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 4 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: anyext_nxv4i64_nxv4i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_ANYEXT [[DEF]](<vscale x 4 x s8>)
+ ; RV64I-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 4 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s64>) = G_ANYEXT %1(<vscale x 4 x s8>)
+ $v8m4 = COPY %0(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: anyext_nxv8i16_nxv8i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv8i16_nxv8i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_ANYEXT [[DEF]](<vscale x 8 x s8>)
+ ; RV32I-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 8 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: anyext_nxv8i16_nxv8i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_ANYEXT [[DEF]](<vscale x 8 x s8>)
+ ; RV64I-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 8 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s16>) = G_ANYEXT %1(<vscale x 8 x s8>)
+ $v8m2 = COPY %0(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+...
+---
+name: anyext_nxv8i32_nxv8i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv8i32_nxv8i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_ANYEXT [[DEF]](<vscale x 8 x s8>)
+ ; RV32I-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 8 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: anyext_nxv8i32_nxv8i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_ANYEXT [[DEF]](<vscale x 8 x s8>)
+ ; RV64I-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 8 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s32>) = G_ANYEXT %1(<vscale x 8 x s8>)
+ $v8m4 = COPY %0(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+...
+---
+name: anyext_nxv8i64_nxv8i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv8i64_nxv8i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_ANYEXT [[DEF]](<vscale x 8 x s8>)
+ ; RV32I-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 8 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: anyext_nxv8i64_nxv8i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_ANYEXT [[DEF]](<vscale x 8 x s8>)
+ ; RV64I-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 8 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s64>) = G_ANYEXT %1(<vscale x 8 x s8>)
+ $v8m8 = COPY %0(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
+---
+name: anyext_nxv16i16_nxv16i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv16i16_nxv16i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_ANYEXT [[DEF]](<vscale x 16 x s8>)
+ ; RV32I-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 16 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: anyext_nxv16i16_nxv16i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_ANYEXT [[DEF]](<vscale x 16 x s8>)
+ ; RV64I-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 16 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s16>) = G_ANYEXT %1(<vscale x 16 x s8>)
+ $v8m4 = COPY %0(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+...
+---
+name: anyext_nxv16i32_nxv16i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv16i32_nxv16i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_ANYEXT [[DEF]](<vscale x 16 x s8>)
+ ; RV32I-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 16 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: anyext_nxv16i32_nxv16i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_ANYEXT [[DEF]](<vscale x 16 x s8>)
+ ; RV64I-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 16 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s32>) = G_ANYEXT %1(<vscale x 16 x s8>)
+ $v8m8 = COPY %0(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+...
+---
+name: anyext_nxv32i16_nxv32i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv32i16_nxv32i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_ANYEXT [[DEF]](<vscale x 32 x s8>)
+ ; RV32I-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 32 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: anyext_nxv32i16_nxv32i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_ANYEXT [[DEF]](<vscale x 32 x s8>)
+ ; RV64I-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 32 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 32 x s16>) = G_ANYEXT %1(<vscale x 32 x s8>)
+ $v8m8 = COPY %0(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+...
+
+# Extend from s16 element vectors
+---
+name: anyext_nxv1i32_nxv1i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv1i32_nxv1i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_ANYEXT [[DEF]](<vscale x 1 x s16>)
+ ; RV32I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: anyext_nxv1i32_nxv1i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_ANYEXT [[DEF]](<vscale x 1 x s16>)
+ ; RV64I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s32>) = G_ANYEXT %1(<vscale x 1 x s16>)
+ $v8 = COPY %0(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv1i64_nxv1i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv1i64_nxv1i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_ANYEXT [[DEF]](<vscale x 1 x s16>)
+ ; RV32I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: anyext_nxv1i64_nxv1i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_ANYEXT [[DEF]](<vscale x 1 x s16>)
+ ; RV64I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s64>) = G_ANYEXT %1(<vscale x 1 x s16>)
+ $v8 = COPY %0(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv2i32_nxv2i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv2i32_nxv2i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_ANYEXT [[DEF]](<vscale x 2 x s16>)
+ ; RV32I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 2 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: anyext_nxv2i32_nxv2i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_ANYEXT [[DEF]](<vscale x 2 x s16>)
+ ; RV64I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 2 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s32>) = G_ANYEXT %1(<vscale x 2 x s16>)
+ $v8 = COPY %0(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv2i64_nxv2i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv2i64_nxv2i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_ANYEXT [[DEF]](<vscale x 2 x s16>)
+ ; RV32I-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 2 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: anyext_nxv2i64_nxv2i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_ANYEXT [[DEF]](<vscale x 2 x s16>)
+ ; RV64I-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 2 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s64>) = G_ANYEXT %1(<vscale x 2 x s16>)
+ $v8m2 = COPY %0(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: anyext_nxv4i32_nxv4i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv4i32_nxv4i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_ANYEXT [[DEF]](<vscale x 4 x s16>)
+ ; RV32I-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 4 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: anyext_nxv4i32_nxv4i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_ANYEXT [[DEF]](<vscale x 4 x s16>)
+ ; RV64I-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 4 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s32>) = G_ANYEXT %1(<vscale x 4 x s16>)
+ $v8m2 = COPY %0(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+...
+---
+name: anyext_nxv4i64_nxv4i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv4i64_nxv4i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_ANYEXT [[DEF]](<vscale x 4 x s16>)
+ ; RV32I-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 4 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: anyext_nxv4i64_nxv4i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_ANYEXT [[DEF]](<vscale x 4 x s16>)
+ ; RV64I-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 4 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s64>) = G_ANYEXT %1(<vscale x 4 x s16>)
+ $v8m4 = COPY %0(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: anyext_nxv8i32_nxv8i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv8i32_nxv8i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_ANYEXT [[DEF]](<vscale x 8 x s16>)
+ ; RV32I-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 8 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: anyext_nxv8i32_nxv8i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_ANYEXT [[DEF]](<vscale x 8 x s16>)
+ ; RV64I-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 8 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s32>) = G_ANYEXT %1(<vscale x 8 x s16>)
+ $v8m4 = COPY %0(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+...
+---
+name: anyext_nxv8i64_nxv8i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv8i64_nxv8i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_ANYEXT [[DEF]](<vscale x 8 x s16>)
+ ; RV32I-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 8 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: anyext_nxv8i64_nxv8i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_ANYEXT [[DEF]](<vscale x 8 x s16>)
+ ; RV64I-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 8 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s64>) = G_ANYEXT %1(<vscale x 8 x s16>)
+ $v8m8 = COPY %0(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
+---
+name: anyext_nxv16i32_nxv16i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv16i32_nxv16i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_ANYEXT [[DEF]](<vscale x 16 x s16>)
+ ; RV32I-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 16 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: anyext_nxv16i32_nxv16i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_ANYEXT [[DEF]](<vscale x 16 x s16>)
+ ; RV64I-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 16 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s32>) = G_ANYEXT %1(<vscale x 16 x s16>)
+ $v8m8 = COPY %0(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+...
+
+# Extend from s32 element vectors
+---
+name: anyext_nxv1i64_nxv1i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv1i64_nxv1i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_ANYEXT [[DEF]](<vscale x 1 x s32>)
+ ; RV32I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: anyext_nxv1i64_nxv1i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_ANYEXT [[DEF]](<vscale x 1 x s32>)
+ ; RV64I-NEXT: $v8 = COPY [[ANYEXT]](<vscale x 1 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s64>) = G_ANYEXT %1(<vscale x 1 x s32>)
+ $v8 = COPY %0(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: anyext_nxv2i64_nxv2i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv2i64_nxv2i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_ANYEXT [[DEF]](<vscale x 2 x s32>)
+ ; RV32I-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 2 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: anyext_nxv2i64_nxv2i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_ANYEXT [[DEF]](<vscale x 2 x s32>)
+ ; RV64I-NEXT: $v8m2 = COPY [[ANYEXT]](<vscale x 2 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s64>) = G_ANYEXT %1(<vscale x 2 x s32>)
+ $v8m2 = COPY %0(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: anyext_nxv4i64_nxv4i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv4i64_nxv4i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_ANYEXT [[DEF]](<vscale x 4 x s32>)
+ ; RV32I-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 4 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: anyext_nxv4i64_nxv4i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_ANYEXT [[DEF]](<vscale x 4 x s32>)
+ ; RV64I-NEXT: $v8m4 = COPY [[ANYEXT]](<vscale x 4 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s64>) = G_ANYEXT %1(<vscale x 4 x s32>)
+ $v8m4 = COPY %0(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: anyext_nxv8i64_nxv8i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv8i64_nxv8i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_ANYEXT [[DEF]](<vscale x 8 x s32>)
+ ; RV32I-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 8 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: anyext_nxv8i64_nxv8i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_ANYEXT [[DEF]](<vscale x 8 x s32>)
+ ; RV64I-NEXT: $v8m8 = COPY [[ANYEXT]](<vscale x 8 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s64>) = G_ANYEXT %1(<vscale x 8 x s32>)
+ $v8m8 = COPY %0(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/sext.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/sext.mir
new file mode 100644
index 00000000000000..5a06efb1347b9b
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/sext.mir
@@ -0,0 +1,1115 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+m,+v -run-pass=regbankselect \
+# RUN: -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN: -o - | FileCheck -check-prefix=RV32I %s
+# RUN: llc -mtriple=riscv64 -mattr=+m,+v -run-pass=regbankselect \
+# RUN: -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN: -o - | FileCheck -check-prefix=RV64I %s
+
+# Extend from s1 element vectors
+---
+name: sext_nxv1i8_nxv1i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: sext_nxv1i8_nxv1i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_SEXT [[DEF]](<vscale x 1 x s1>)
+ ; RV32I-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: sext_nxv1i8_nxv1i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_SEXT [[DEF]](<vscale x 1 x s1>)
+ ; RV64I-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s8>) = G_SEXT %1(<vscale x 1 x s1>)
+ $v8 = COPY %0(<vscale x 1 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv1i16_nxv1i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: sext_nxv1i16_nxv1i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_SEXT [[DEF]](<vscale x 1 x s1>)
+ ; RV32I-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: sext_nxv1i16_nxv1i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_SEXT [[DEF]](<vscale x 1 x s1>)
+ ; RV64I-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s16>) = G_SEXT %1(<vscale x 1 x s1>)
+ $v8 = COPY %0(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv1i32_nxv1i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: sext_nxv1i32_nxv1i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_SEXT [[DEF]](<vscale x 1 x s1>)
+ ; RV32I-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: sext_nxv1i32_nxv1i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_SEXT [[DEF]](<vscale x 1 x s1>)
+ ; RV64I-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s32>) = G_SEXT %1(<vscale x 1 x s1>)
+ $v8 = COPY %0(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv1i64_nxv1i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: sext_nxv1i64_nxv1i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_SEXT [[DEF]](<vscale x 1 x s1>)
+ ; RV32I-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: sext_nxv1i64_nxv1i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_SEXT [[DEF]](<vscale x 1 x s1>)
+ ; RV64I-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s64>) = G_SEXT %1(<vscale x 1 x s1>)
+ $v8 = COPY %0(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv2i8_nxv2i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: sext_nxv2i8_nxv2i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_SEXT [[DEF]](<vscale x 2 x s1>)
+ ; RV32I-NEXT: $v8 = COPY [[SEXT]](<vscale x 2 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: sext_nxv2i8_nxv2i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_SEXT [[DEF]](<vscale x 2 x s1>)
+ ; RV64I-NEXT: $v8 = COPY [[SEXT]](<vscale x 2 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s8>) = G_SEXT %1(<vscale x 2 x s1>)
+ $v8 = COPY %0(<vscale x 2 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv2i16_nxv2i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: sext_nxv2i16_nxv2i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_SEXT [[DEF]](<vscale x 2 x s1>)
+ ; RV32I-NEXT: $v8 = COPY [[SEXT]](<vscale x 2 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: sext_nxv2i16_nxv2i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_SEXT [[DEF]](<vscale x 2 x s1>)
+ ; RV64I-NEXT: $v8 = COPY [[SEXT]](<vscale x 2 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s16>) = G_SEXT %1(<vscale x 2 x s1>)
+ $v8 = COPY %0(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv2i32_nxv2i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: sext_nxv2i32_nxv2i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_SEXT [[DEF]](<vscale x 2 x s1>)
+ ; RV32I-NEXT: $v8 = COPY [[SEXT]](<vscale x 2 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: sext_nxv2i32_nxv2i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_SEXT [[DEF]](<vscale x 2 x s1>)
+ ; RV64I-NEXT: $v8 = COPY [[SEXT]](<vscale x 2 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s32>) = G_SEXT %1(<vscale x 2 x s1>)
+ $v8 = COPY %0(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv2i64_nxv2i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: sext_nxv2i64_nxv2i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_SEXT [[DEF]](<vscale x 2 x s1>)
+ ; RV32I-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 2 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: sext_nxv2i64_nxv2i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_SEXT [[DEF]](<vscale x 2 x s1>)
+ ; RV64I-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 2 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s64>) = G_SEXT %1(<vscale x 2 x s1>)
+ $v8m2 = COPY %0(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: sext_nxv4i8_nxv4i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: sext_nxv4i8_nxv4i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_SEXT [[DEF]](<vscale x 4 x s1>)
+ ; RV32I-NEXT: $v8 = COPY [[SEXT]](<vscale x 4 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: sext_nxv4i8_nxv4i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_SEXT [[DEF]](<vscale x 4 x s1>)
+ ; RV64I-NEXT: $v8 = COPY [[SEXT]](<vscale x 4 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s8>) = G_SEXT %1(<vscale x 4 x s1>)
+ $v8 = COPY %0(<vscale x 4 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv4i16_nxv4i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: sext_nxv4i16_nxv4i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_SEXT [[DEF]](<vscale x 4 x s1>)
+ ; RV32I-NEXT: $v8 = COPY [[SEXT]](<vscale x 4 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: sext_nxv4i16_nxv4i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_SEXT [[DEF]](<vscale x 4 x s1>)
+ ; RV64I-NEXT: $v8 = COPY [[SEXT]](<vscale x 4 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s16>) = G_SEXT %1(<vscale x 4 x s1>)
+ $v8 = COPY %0(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv4i32_nxv4i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: sext_nxv4i32_nxv4i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_SEXT [[DEF]](<vscale x 4 x s1>)
+ ; RV32I-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 4 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: sext_nxv4i32_nxv4i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_SEXT [[DEF]](<vscale x 4 x s1>)
+ ; RV64I-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 4 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s32>) = G_SEXT %1(<vscale x 4 x s1>)
+ $v8m2 = COPY %0(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+...
+---
+name: sext_nxv4i64_nxv4i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: sext_nxv4i64_nxv4i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_SEXT [[DEF]](<vscale x 4 x s1>)
+ ; RV32I-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 4 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: sext_nxv4i64_nxv4i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_SEXT [[DEF]](<vscale x 4 x s1>)
+ ; RV64I-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 4 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s64>) = G_SEXT %1(<vscale x 4 x s1>)
+ $v8m4 = COPY %0(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: sext_nxv8i8_nxv8i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: sext_nxv8i8_nxv8i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_SEXT [[DEF]](<vscale x 8 x s1>)
+ ; RV32I-NEXT: $v8 = COPY [[SEXT]](<vscale x 8 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: sext_nxv8i8_nxv8i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_SEXT [[DEF]](<vscale x 8 x s1>)
+ ; RV64I-NEXT: $v8 = COPY [[SEXT]](<vscale x 8 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s8>) = G_SEXT %1(<vscale x 8 x s1>)
+ $v8 = COPY %0(<vscale x 8 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv8i16_nxv8i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: sext_nxv8i16_nxv8i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_SEXT [[DEF]](<vscale x 8 x s1>)
+ ; RV32I-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 8 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: sext_nxv8i16_nxv8i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_SEXT [[DEF]](<vscale x 8 x s1>)
+ ; RV64I-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 8 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s16>) = G_SEXT %1(<vscale x 8 x s1>)
+ $v8m2 = COPY %0(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+...
+---
+name: sext_nxv8i32_nxv8i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: sext_nxv8i32_nxv8i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_SEXT [[DEF]](<vscale x 8 x s1>)
+ ; RV32I-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 8 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: sext_nxv8i32_nxv8i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_SEXT [[DEF]](<vscale x 8 x s1>)
+ ; RV64I-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 8 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s32>) = G_SEXT %1(<vscale x 8 x s1>)
+ $v8m4 = COPY %0(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+...
+---
+name: sext_nxv8i64_nxv8i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: sext_nxv8i64_nxv8i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_SEXT [[DEF]](<vscale x 8 x s1>)
+ ; RV32I-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 8 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: sext_nxv8i64_nxv8i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_SEXT [[DEF]](<vscale x 8 x s1>)
+ ; RV64I-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 8 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s64>) = G_SEXT %1(<vscale x 8 x s1>)
+ $v8m8 = COPY %0(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
+---
+name: sext_nxv16i8_nxv16i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: sext_nxv16i8_nxv16i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_SEXT [[DEF]](<vscale x 16 x s1>)
+ ; RV32I-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 16 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: sext_nxv16i8_nxv16i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_SEXT [[DEF]](<vscale x 16 x s1>)
+ ; RV64I-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 16 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s8>) = G_SEXT %1(<vscale x 16 x s1>)
+ $v8m2 = COPY %0(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+...
+---
+name: sext_nxv16i16_nxv16i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: sext_nxv16i16_nxv16i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_SEXT [[DEF]](<vscale x 16 x s1>)
+ ; RV32I-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 16 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: sext_nxv16i16_nxv16i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_SEXT [[DEF]](<vscale x 16 x s1>)
+ ; RV64I-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 16 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s16>) = G_SEXT %1(<vscale x 16 x s1>)
+ $v8m4 = COPY %0(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+...
+---
+name: sext_nxv16i32_nxv16i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: sext_nxv16i32_nxv16i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_SEXT [[DEF]](<vscale x 16 x s1>)
+ ; RV32I-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 16 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: sext_nxv16i32_nxv16i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_SEXT [[DEF]](<vscale x 16 x s1>)
+ ; RV64I-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 16 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s32>) = G_SEXT %1(<vscale x 16 x s1>)
+ $v8m8 = COPY %0(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+...
+---
+name: sext_nxv32i8_nxv32i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: sext_nxv32i8_nxv32i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_SEXT [[DEF]](<vscale x 32 x s1>)
+ ; RV32I-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 32 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: sext_nxv32i8_nxv32i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_SEXT [[DEF]](<vscale x 32 x s1>)
+ ; RV64I-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 32 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 32 x s8>) = G_SEXT %1(<vscale x 32 x s1>)
+ $v8m4 = COPY %0(<vscale x 32 x s8>)
+ PseudoRET implicit $v8m4
+...
+---
+name: sext_nxv32i16_nxv32i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: sext_nxv32i16_nxv32i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_SEXT [[DEF]](<vscale x 32 x s1>)
+ ; RV32I-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 32 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: sext_nxv32i16_nxv32i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_SEXT [[DEF]](<vscale x 32 x s1>)
+ ; RV64I-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 32 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 32 x s16>) = G_SEXT %1(<vscale x 32 x s1>)
+ $v8m8 = COPY %0(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+...
+---
+name: sext_nxv64i8_nxv64i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: sext_nxv64i8_nxv64i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_SEXT [[DEF]](<vscale x 64 x s1>)
+ ; RV32I-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 64 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: sext_nxv64i8_nxv64i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_SEXT [[DEF]](<vscale x 64 x s1>)
+ ; RV64I-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 64 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 64 x s8>) = G_SEXT %1(<vscale x 64 x s1>)
+ $v8m8 = COPY %0(<vscale x 64 x s8>)
+ PseudoRET implicit $v8m8
+...
+
+# Extend from s8 element vectors
+---
+name: sext_nxv1i16_nxv1i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: sext_nxv1i16_nxv1i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_SEXT [[DEF]](<vscale x 1 x s8>)
+ ; RV32I-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: sext_nxv1i16_nxv1i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_SEXT [[DEF]](<vscale x 1 x s8>)
+ ; RV64I-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s16>) = G_SEXT %1(<vscale x 1 x s8>)
+ $v8 = COPY %0(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv1i32_nxv1i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: sext_nxv1i32_nxv1i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_SEXT [[DEF]](<vscale x 1 x s8>)
+ ; RV32I-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: sext_nxv1i32_nxv1i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_SEXT [[DEF]](<vscale x 1 x s8>)
+ ; RV64I-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s32>) = G_SEXT %1(<vscale x 1 x s8>)
+ $v8 = COPY %0(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv1i64_nxv1i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: sext_nxv1i64_nxv1i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_SEXT [[DEF]](<vscale x 1 x s8>)
+ ; RV32I-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: sext_nxv1i64_nxv1i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_SEXT [[DEF]](<vscale x 1 x s8>)
+ ; RV64I-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s64>) = G_SEXT %1(<vscale x 1 x s8>)
+ $v8 = COPY %0(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv2i16_nxv2i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: sext_nxv2i16_nxv2i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_SEXT [[DEF]](<vscale x 2 x s8>)
+ ; RV32I-NEXT: $v8 = COPY [[SEXT]](<vscale x 2 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: sext_nxv2i16_nxv2i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_SEXT [[DEF]](<vscale x 2 x s8>)
+ ; RV64I-NEXT: $v8 = COPY [[SEXT]](<vscale x 2 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s16>) = G_SEXT %1(<vscale x 2 x s8>)
+ $v8 = COPY %0(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv2i32_nxv2i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: sext_nxv2i32_nxv2i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_SEXT [[DEF]](<vscale x 2 x s8>)
+ ; RV32I-NEXT: $v8 = COPY [[SEXT]](<vscale x 2 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: sext_nxv2i32_nxv2i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_SEXT [[DEF]](<vscale x 2 x s8>)
+ ; RV64I-NEXT: $v8 = COPY [[SEXT]](<vscale x 2 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s32>) = G_SEXT %1(<vscale x 2 x s8>)
+ $v8 = COPY %0(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv2i64_nxv2i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: sext_nxv2i64_nxv2i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_SEXT [[DEF]](<vscale x 2 x s8>)
+ ; RV32I-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 2 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: sext_nxv2i64_nxv2i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_SEXT [[DEF]](<vscale x 2 x s8>)
+ ; RV64I-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 2 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s64>) = G_SEXT %1(<vscale x 2 x s8>)
+ $v8m2 = COPY %0(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: sext_nxv4i16_nxv4i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: sext_nxv4i16_nxv4i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_SEXT [[DEF]](<vscale x 4 x s8>)
+ ; RV32I-NEXT: $v8 = COPY [[SEXT]](<vscale x 4 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: sext_nxv4i16_nxv4i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_SEXT [[DEF]](<vscale x 4 x s8>)
+ ; RV64I-NEXT: $v8 = COPY [[SEXT]](<vscale x 4 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s16>) = G_SEXT %1(<vscale x 4 x s8>)
+ $v8 = COPY %0(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv4i32_nxv4i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: sext_nxv4i32_nxv4i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_SEXT [[DEF]](<vscale x 4 x s8>)
+ ; RV32I-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 4 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: sext_nxv4i32_nxv4i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_SEXT [[DEF]](<vscale x 4 x s8>)
+ ; RV64I-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 4 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s32>) = G_SEXT %1(<vscale x 4 x s8>)
+ $v8m2 = COPY %0(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+...
+---
+name: sext_nxv4i64_nxv4i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: sext_nxv4i64_nxv4i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_SEXT [[DEF]](<vscale x 4 x s8>)
+ ; RV32I-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 4 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: sext_nxv4i64_nxv4i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_SEXT [[DEF]](<vscale x 4 x s8>)
+ ; RV64I-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 4 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s64>) = G_SEXT %1(<vscale x 4 x s8>)
+ $v8m4 = COPY %0(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: sext_nxv8i16_nxv8i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: sext_nxv8i16_nxv8i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_SEXT [[DEF]](<vscale x 8 x s8>)
+ ; RV32I-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 8 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: sext_nxv8i16_nxv8i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_SEXT [[DEF]](<vscale x 8 x s8>)
+ ; RV64I-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 8 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s16>) = G_SEXT %1(<vscale x 8 x s8>)
+ $v8m2 = COPY %0(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+...
+---
+name: sext_nxv8i32_nxv8i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: sext_nxv8i32_nxv8i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_SEXT [[DEF]](<vscale x 8 x s8>)
+ ; RV32I-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 8 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: sext_nxv8i32_nxv8i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_SEXT [[DEF]](<vscale x 8 x s8>)
+ ; RV64I-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 8 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s32>) = G_SEXT %1(<vscale x 8 x s8>)
+ $v8m4 = COPY %0(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+...
+---
+name: sext_nxv8i64_nxv8i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: sext_nxv8i64_nxv8i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_SEXT [[DEF]](<vscale x 8 x s8>)
+ ; RV32I-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 8 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: sext_nxv8i64_nxv8i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_SEXT [[DEF]](<vscale x 8 x s8>)
+ ; RV64I-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 8 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s64>) = G_SEXT %1(<vscale x 8 x s8>)
+ $v8m8 = COPY %0(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
+---
+name: sext_nxv16i16_nxv16i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: sext_nxv16i16_nxv16i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_SEXT [[DEF]](<vscale x 16 x s8>)
+ ; RV32I-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 16 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: sext_nxv16i16_nxv16i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_SEXT [[DEF]](<vscale x 16 x s8>)
+ ; RV64I-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 16 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s16>) = G_SEXT %1(<vscale x 16 x s8>)
+ $v8m4 = COPY %0(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+...
+---
+name: sext_nxv16i32_nxv16i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: sext_nxv16i32_nxv16i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_SEXT [[DEF]](<vscale x 16 x s8>)
+ ; RV32I-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 16 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: sext_nxv16i32_nxv16i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_SEXT [[DEF]](<vscale x 16 x s8>)
+ ; RV64I-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 16 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s32>) = G_SEXT %1(<vscale x 16 x s8>)
+ $v8m8 = COPY %0(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+...
+---
+name: sext_nxv32i16_nxv32i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: sext_nxv32i16_nxv32i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_SEXT [[DEF]](<vscale x 32 x s8>)
+ ; RV32I-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 32 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: sext_nxv32i16_nxv32i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_SEXT [[DEF]](<vscale x 32 x s8>)
+ ; RV64I-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 32 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 32 x s16>) = G_SEXT %1(<vscale x 32 x s8>)
+ $v8m8 = COPY %0(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+...
+
+# Extend from s16 element vectors
+---
+name: sext_nxv1i32_nxv1i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: sext_nxv1i32_nxv1i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_SEXT [[DEF]](<vscale x 1 x s16>)
+ ; RV32I-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: sext_nxv1i32_nxv1i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_SEXT [[DEF]](<vscale x 1 x s16>)
+ ; RV64I-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s32>) = G_SEXT %1(<vscale x 1 x s16>)
+ $v8 = COPY %0(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv1i64_nxv1i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: sext_nxv1i64_nxv1i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_SEXT [[DEF]](<vscale x 1 x s16>)
+ ; RV32I-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: sext_nxv1i64_nxv1i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_SEXT [[DEF]](<vscale x 1 x s16>)
+ ; RV64I-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s64>) = G_SEXT %1(<vscale x 1 x s16>)
+ $v8 = COPY %0(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv2i32_nxv2i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: sext_nxv2i32_nxv2i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_SEXT [[DEF]](<vscale x 2 x s16>)
+ ; RV32I-NEXT: $v8 = COPY [[SEXT]](<vscale x 2 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: sext_nxv2i32_nxv2i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_SEXT [[DEF]](<vscale x 2 x s16>)
+ ; RV64I-NEXT: $v8 = COPY [[SEXT]](<vscale x 2 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s32>) = G_SEXT %1(<vscale x 2 x s16>)
+ $v8 = COPY %0(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv2i64_nxv2i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: sext_nxv2i64_nxv2i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_SEXT [[DEF]](<vscale x 2 x s16>)
+ ; RV32I-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 2 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: sext_nxv2i64_nxv2i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_SEXT [[DEF]](<vscale x 2 x s16>)
+ ; RV64I-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 2 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s64>) = G_SEXT %1(<vscale x 2 x s16>)
+ $v8m2 = COPY %0(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: sext_nxv4i32_nxv4i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: sext_nxv4i32_nxv4i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_SEXT [[DEF]](<vscale x 4 x s16>)
+ ; RV32I-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 4 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: sext_nxv4i32_nxv4i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_SEXT [[DEF]](<vscale x 4 x s16>)
+ ; RV64I-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 4 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s32>) = G_SEXT %1(<vscale x 4 x s16>)
+ $v8m2 = COPY %0(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+...
+---
+name: sext_nxv4i64_nxv4i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: sext_nxv4i64_nxv4i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_SEXT [[DEF]](<vscale x 4 x s16>)
+ ; RV32I-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 4 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: sext_nxv4i64_nxv4i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_SEXT [[DEF]](<vscale x 4 x s16>)
+ ; RV64I-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 4 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s64>) = G_SEXT %1(<vscale x 4 x s16>)
+ $v8m4 = COPY %0(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: sext_nxv8i32_nxv8i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: sext_nxv8i32_nxv8i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_SEXT [[DEF]](<vscale x 8 x s16>)
+ ; RV32I-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 8 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: sext_nxv8i32_nxv8i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_SEXT [[DEF]](<vscale x 8 x s16>)
+ ; RV64I-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 8 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s32>) = G_SEXT %1(<vscale x 8 x s16>)
+ $v8m4 = COPY %0(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+...
+---
+name: sext_nxv8i64_nxv8i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: sext_nxv8i64_nxv8i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_SEXT [[DEF]](<vscale x 8 x s16>)
+ ; RV32I-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 8 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: sext_nxv8i64_nxv8i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_SEXT [[DEF]](<vscale x 8 x s16>)
+ ; RV64I-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 8 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s64>) = G_SEXT %1(<vscale x 8 x s16>)
+ $v8m8 = COPY %0(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
+---
+name: sext_nxv16i32_nxv16i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: sext_nxv16i32_nxv16i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_SEXT [[DEF]](<vscale x 16 x s16>)
+ ; RV32I-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 16 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: sext_nxv16i32_nxv16i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_SEXT [[DEF]](<vscale x 16 x s16>)
+ ; RV64I-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 16 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s32>) = G_SEXT %1(<vscale x 16 x s16>)
+ $v8m8 = COPY %0(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+...
+
+# Extend from s32 element vectors
+---
+name: sext_nxv1i64_nxv1i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: sext_nxv1i64_nxv1i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_SEXT [[DEF]](<vscale x 1 x s32>)
+ ; RV32I-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: sext_nxv1i64_nxv1i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_SEXT [[DEF]](<vscale x 1 x s32>)
+ ; RV64I-NEXT: $v8 = COPY [[SEXT]](<vscale x 1 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s64>) = G_SEXT %1(<vscale x 1 x s32>)
+ $v8 = COPY %0(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: sext_nxv2i64_nxv2i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: sext_nxv2i64_nxv2i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_SEXT [[DEF]](<vscale x 2 x s32>)
+ ; RV32I-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 2 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: sext_nxv2i64_nxv2i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_SEXT [[DEF]](<vscale x 2 x s32>)
+ ; RV64I-NEXT: $v8m2 = COPY [[SEXT]](<vscale x 2 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s64>) = G_SEXT %1(<vscale x 2 x s32>)
+ $v8m2 = COPY %0(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: sext_nxv4i64_nxv4i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: sext_nxv4i64_nxv4i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_SEXT [[DEF]](<vscale x 4 x s32>)
+ ; RV32I-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 4 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: sext_nxv4i64_nxv4i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_SEXT [[DEF]](<vscale x 4 x s32>)
+ ; RV64I-NEXT: $v8m4 = COPY [[SEXT]](<vscale x 4 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s64>) = G_SEXT %1(<vscale x 4 x s32>)
+ $v8m4 = COPY %0(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: sext_nxv8i64_nxv8i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: sext_nxv8i64_nxv8i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_SEXT [[DEF]](<vscale x 8 x s32>)
+ ; RV32I-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 8 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: sext_nxv8i64_nxv8i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[SEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_SEXT [[DEF]](<vscale x 8 x s32>)
+ ; RV64I-NEXT: $v8m8 = COPY [[SEXT]](<vscale x 8 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s64>) = G_SEXT %1(<vscale x 8 x s32>)
+ $v8m8 = COPY %0(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/zext.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/zext.mir
new file mode 100644
index 00000000000000..653133abbf148b
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/zext.mir
@@ -0,0 +1,1115 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+m,+v -run-pass=regbankselect \
+# RUN: -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN: -o - | FileCheck -check-prefix=RV32I %s
+# RUN: llc -mtriple=riscv64 -mattr=+m,+v -run-pass=regbankselect \
+# RUN: -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN: -o - | FileCheck -check-prefix=RV64I %s
+
+# Extend from s1 element vectors
+---
+name: zext_nxv1i8_nxv1i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv1i8_nxv1i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_ZEXT [[DEF]](<vscale x 1 x s1>)
+ ; RV32I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: zext_nxv1i8_nxv1i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_ZEXT [[DEF]](<vscale x 1 x s1>)
+ ; RV64I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s8>) = G_ZEXT %1(<vscale x 1 x s1>)
+ $v8 = COPY %0(<vscale x 1 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv1i16_nxv1i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv1i16_nxv1i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_ZEXT [[DEF]](<vscale x 1 x s1>)
+ ; RV32I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: zext_nxv1i16_nxv1i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_ZEXT [[DEF]](<vscale x 1 x s1>)
+ ; RV64I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s16>) = G_ZEXT %1(<vscale x 1 x s1>)
+ $v8 = COPY %0(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv1i32_nxv1i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv1i32_nxv1i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_ZEXT [[DEF]](<vscale x 1 x s1>)
+ ; RV32I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: zext_nxv1i32_nxv1i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_ZEXT [[DEF]](<vscale x 1 x s1>)
+ ; RV64I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s32>) = G_ZEXT %1(<vscale x 1 x s1>)
+ $v8 = COPY %0(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv1i64_nxv1i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv1i64_nxv1i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_ZEXT [[DEF]](<vscale x 1 x s1>)
+ ; RV32I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: zext_nxv1i64_nxv1i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_ZEXT [[DEF]](<vscale x 1 x s1>)
+ ; RV64I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s64>) = G_ZEXT %1(<vscale x 1 x s1>)
+ $v8 = COPY %0(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv2i8_nxv2i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv2i8_nxv2i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_ZEXT [[DEF]](<vscale x 2 x s1>)
+ ; RV32I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 2 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: zext_nxv2i8_nxv2i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_ZEXT [[DEF]](<vscale x 2 x s1>)
+ ; RV64I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 2 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s8>) = G_ZEXT %1(<vscale x 2 x s1>)
+ $v8 = COPY %0(<vscale x 2 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv2i16_nxv2i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv2i16_nxv2i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_ZEXT [[DEF]](<vscale x 2 x s1>)
+ ; RV32I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 2 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: zext_nxv2i16_nxv2i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_ZEXT [[DEF]](<vscale x 2 x s1>)
+ ; RV64I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 2 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s16>) = G_ZEXT %1(<vscale x 2 x s1>)
+ $v8 = COPY %0(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv2i32_nxv2i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv2i32_nxv2i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_ZEXT [[DEF]](<vscale x 2 x s1>)
+ ; RV32I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 2 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: zext_nxv2i32_nxv2i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_ZEXT [[DEF]](<vscale x 2 x s1>)
+ ; RV64I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 2 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s32>) = G_ZEXT %1(<vscale x 2 x s1>)
+ $v8 = COPY %0(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv2i64_nxv2i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv2i64_nxv2i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_ZEXT [[DEF]](<vscale x 2 x s1>)
+ ; RV32I-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 2 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: zext_nxv2i64_nxv2i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_ZEXT [[DEF]](<vscale x 2 x s1>)
+ ; RV64I-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 2 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s64>) = G_ZEXT %1(<vscale x 2 x s1>)
+ $v8m2 = COPY %0(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: zext_nxv4i8_nxv4i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv4i8_nxv4i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_ZEXT [[DEF]](<vscale x 4 x s1>)
+ ; RV32I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 4 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: zext_nxv4i8_nxv4i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_ZEXT [[DEF]](<vscale x 4 x s1>)
+ ; RV64I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 4 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s8>) = G_ZEXT %1(<vscale x 4 x s1>)
+ $v8 = COPY %0(<vscale x 4 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv4i16_nxv4i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv4i16_nxv4i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_ZEXT [[DEF]](<vscale x 4 x s1>)
+ ; RV32I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 4 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: zext_nxv4i16_nxv4i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_ZEXT [[DEF]](<vscale x 4 x s1>)
+ ; RV64I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 4 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s16>) = G_ZEXT %1(<vscale x 4 x s1>)
+ $v8 = COPY %0(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv4i32_nxv4i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv4i32_nxv4i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_ZEXT [[DEF]](<vscale x 4 x s1>)
+ ; RV32I-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 4 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: zext_nxv4i32_nxv4i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_ZEXT [[DEF]](<vscale x 4 x s1>)
+ ; RV64I-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 4 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s32>) = G_ZEXT %1(<vscale x 4 x s1>)
+ $v8m2 = COPY %0(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+...
+---
+name: zext_nxv4i64_nxv4i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv4i64_nxv4i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_ZEXT [[DEF]](<vscale x 4 x s1>)
+ ; RV32I-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 4 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: zext_nxv4i64_nxv4i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_ZEXT [[DEF]](<vscale x 4 x s1>)
+ ; RV64I-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 4 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s64>) = G_ZEXT %1(<vscale x 4 x s1>)
+ $v8m4 = COPY %0(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: zext_nxv8i8_nxv8i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv8i8_nxv8i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_ZEXT [[DEF]](<vscale x 8 x s1>)
+ ; RV32I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 8 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: zext_nxv8i8_nxv8i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_ZEXT [[DEF]](<vscale x 8 x s1>)
+ ; RV64I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 8 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s8>) = G_ZEXT %1(<vscale x 8 x s1>)
+ $v8 = COPY %0(<vscale x 8 x s8>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv8i16_nxv8i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv8i16_nxv8i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_ZEXT [[DEF]](<vscale x 8 x s1>)
+ ; RV32I-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 8 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: zext_nxv8i16_nxv8i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_ZEXT [[DEF]](<vscale x 8 x s1>)
+ ; RV64I-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 8 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s16>) = G_ZEXT %1(<vscale x 8 x s1>)
+ $v8m2 = COPY %0(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+...
+---
+name: zext_nxv8i32_nxv8i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv8i32_nxv8i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_ZEXT [[DEF]](<vscale x 8 x s1>)
+ ; RV32I-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 8 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: zext_nxv8i32_nxv8i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_ZEXT [[DEF]](<vscale x 8 x s1>)
+ ; RV64I-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 8 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s32>) = G_ZEXT %1(<vscale x 8 x s1>)
+ $v8m4 = COPY %0(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+...
+---
+name: zext_nxv8i64_nxv8i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv8i64_nxv8i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_ZEXT [[DEF]](<vscale x 8 x s1>)
+ ; RV32I-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 8 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: zext_nxv8i64_nxv8i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_ZEXT [[DEF]](<vscale x 8 x s1>)
+ ; RV64I-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 8 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s64>) = G_ZEXT %1(<vscale x 8 x s1>)
+ $v8m8 = COPY %0(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
+---
+name: zext_nxv16i8_nxv16i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv16i8_nxv16i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_ZEXT [[DEF]](<vscale x 16 x s1>)
+ ; RV32I-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 16 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: zext_nxv16i8_nxv16i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_ZEXT [[DEF]](<vscale x 16 x s1>)
+ ; RV64I-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 16 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s8>) = G_ZEXT %1(<vscale x 16 x s1>)
+ $v8m2 = COPY %0(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+...
+---
+name: zext_nxv16i16_nxv16i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv16i16_nxv16i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_ZEXT [[DEF]](<vscale x 16 x s1>)
+ ; RV32I-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 16 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: zext_nxv16i16_nxv16i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_ZEXT [[DEF]](<vscale x 16 x s1>)
+ ; RV64I-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 16 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s16>) = G_ZEXT %1(<vscale x 16 x s1>)
+ $v8m4 = COPY %0(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+...
+---
+name: zext_nxv16i32_nxv16i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv16i32_nxv16i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_ZEXT [[DEF]](<vscale x 16 x s1>)
+ ; RV32I-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 16 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: zext_nxv16i32_nxv16i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_ZEXT [[DEF]](<vscale x 16 x s1>)
+ ; RV64I-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 16 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s32>) = G_ZEXT %1(<vscale x 16 x s1>)
+ $v8m8 = COPY %0(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+...
+---
+name: zext_nxv32i8_nxv32i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv32i8_nxv32i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_ZEXT [[DEF]](<vscale x 32 x s1>)
+ ; RV32I-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 32 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: zext_nxv32i8_nxv32i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_ZEXT [[DEF]](<vscale x 32 x s1>)
+ ; RV64I-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 32 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 32 x s8>) = G_ZEXT %1(<vscale x 32 x s1>)
+ $v8m4 = COPY %0(<vscale x 32 x s8>)
+ PseudoRET implicit $v8m4
+...
+---
+name: zext_nxv32i16_nxv32i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv32i16_nxv32i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_ZEXT [[DEF]](<vscale x 32 x s1>)
+ ; RV32I-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 32 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: zext_nxv32i16_nxv32i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_ZEXT [[DEF]](<vscale x 32 x s1>)
+ ; RV64I-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 32 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 32 x s16>) = G_ZEXT %1(<vscale x 32 x s1>)
+ $v8m8 = COPY %0(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+...
+---
+name: zext_nxv64i8_nxv64i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv64i8_nxv64i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_ZEXT [[DEF]](<vscale x 64 x s1>)
+ ; RV32I-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 64 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: zext_nxv64i8_nxv64i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_ZEXT [[DEF]](<vscale x 64 x s1>)
+ ; RV64I-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 64 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 64 x s8>) = G_ZEXT %1(<vscale x 64 x s1>)
+ $v8m8 = COPY %0(<vscale x 64 x s8>)
+ PseudoRET implicit $v8m8
+...
+
+# Extend from s8 element vectors
+---
+name: zext_nxv1i16_nxv1i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv1i16_nxv1i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_ZEXT [[DEF]](<vscale x 1 x s8>)
+ ; RV32I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: zext_nxv1i16_nxv1i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_ZEXT [[DEF]](<vscale x 1 x s8>)
+ ; RV64I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s16>) = G_ZEXT %1(<vscale x 1 x s8>)
+ $v8 = COPY %0(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv1i32_nxv1i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv1i32_nxv1i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_ZEXT [[DEF]](<vscale x 1 x s8>)
+ ; RV32I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: zext_nxv1i32_nxv1i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_ZEXT [[DEF]](<vscale x 1 x s8>)
+ ; RV64I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s32>) = G_ZEXT %1(<vscale x 1 x s8>)
+ $v8 = COPY %0(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv1i64_nxv1i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv1i64_nxv1i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_ZEXT [[DEF]](<vscale x 1 x s8>)
+ ; RV32I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: zext_nxv1i64_nxv1i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_ZEXT [[DEF]](<vscale x 1 x s8>)
+ ; RV64I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s64>) = G_ZEXT %1(<vscale x 1 x s8>)
+ $v8 = COPY %0(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv2i16_nxv2i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv2i16_nxv2i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_ZEXT [[DEF]](<vscale x 2 x s8>)
+ ; RV32I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 2 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: zext_nxv2i16_nxv2i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_ZEXT [[DEF]](<vscale x 2 x s8>)
+ ; RV64I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 2 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s16>) = G_ZEXT %1(<vscale x 2 x s8>)
+ $v8 = COPY %0(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv2i32_nxv2i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv2i32_nxv2i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_ZEXT [[DEF]](<vscale x 2 x s8>)
+ ; RV32I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 2 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: zext_nxv2i32_nxv2i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_ZEXT [[DEF]](<vscale x 2 x s8>)
+ ; RV64I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 2 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s32>) = G_ZEXT %1(<vscale x 2 x s8>)
+ $v8 = COPY %0(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv2i64_nxv2i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv2i64_nxv2i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_ZEXT [[DEF]](<vscale x 2 x s8>)
+ ; RV32I-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 2 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: zext_nxv2i64_nxv2i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_ZEXT [[DEF]](<vscale x 2 x s8>)
+ ; RV64I-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 2 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s64>) = G_ZEXT %1(<vscale x 2 x s8>)
+ $v8m2 = COPY %0(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: zext_nxv4i16_nxv4i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv4i16_nxv4i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_ZEXT [[DEF]](<vscale x 4 x s8>)
+ ; RV32I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 4 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: zext_nxv4i16_nxv4i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_ZEXT [[DEF]](<vscale x 4 x s8>)
+ ; RV64I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 4 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s16>) = G_ZEXT %1(<vscale x 4 x s8>)
+ $v8 = COPY %0(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv4i32_nxv4i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv4i32_nxv4i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_ZEXT [[DEF]](<vscale x 4 x s8>)
+ ; RV32I-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 4 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: zext_nxv4i32_nxv4i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_ZEXT [[DEF]](<vscale x 4 x s8>)
+ ; RV64I-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 4 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s32>) = G_ZEXT %1(<vscale x 4 x s8>)
+ $v8m2 = COPY %0(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+...
+---
+name: zext_nxv4i64_nxv4i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv4i64_nxv4i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_ZEXT [[DEF]](<vscale x 4 x s8>)
+ ; RV32I-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 4 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: zext_nxv4i64_nxv4i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_ZEXT [[DEF]](<vscale x 4 x s8>)
+ ; RV64I-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 4 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s64>) = G_ZEXT %1(<vscale x 4 x s8>)
+ $v8m4 = COPY %0(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: zext_nxv8i16_nxv8i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv8i16_nxv8i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_ZEXT [[DEF]](<vscale x 8 x s8>)
+ ; RV32I-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 8 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: zext_nxv8i16_nxv8i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_ZEXT [[DEF]](<vscale x 8 x s8>)
+ ; RV64I-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 8 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s16>) = G_ZEXT %1(<vscale x 8 x s8>)
+ $v8m2 = COPY %0(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+...
+---
+name: zext_nxv8i32_nxv8i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv8i32_nxv8i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_ZEXT [[DEF]](<vscale x 8 x s8>)
+ ; RV32I-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 8 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: zext_nxv8i32_nxv8i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_ZEXT [[DEF]](<vscale x 8 x s8>)
+ ; RV64I-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 8 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s32>) = G_ZEXT %1(<vscale x 8 x s8>)
+ $v8m4 = COPY %0(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+...
+---
+name: zext_nxv8i64_nxv8i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv8i64_nxv8i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_ZEXT [[DEF]](<vscale x 8 x s8>)
+ ; RV32I-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 8 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: zext_nxv8i64_nxv8i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_ZEXT [[DEF]](<vscale x 8 x s8>)
+ ; RV64I-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 8 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s64>) = G_ZEXT %1(<vscale x 8 x s8>)
+ $v8m8 = COPY %0(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
+---
+name: zext_nxv16i16_nxv16i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv16i16_nxv16i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_ZEXT [[DEF]](<vscale x 16 x s8>)
+ ; RV32I-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 16 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: zext_nxv16i16_nxv16i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_ZEXT [[DEF]](<vscale x 16 x s8>)
+ ; RV64I-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 16 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s16>) = G_ZEXT %1(<vscale x 16 x s8>)
+ $v8m4 = COPY %0(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+...
+---
+name: zext_nxv16i32_nxv16i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv16i32_nxv16i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_ZEXT [[DEF]](<vscale x 16 x s8>)
+ ; RV32I-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 16 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: zext_nxv16i32_nxv16i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_ZEXT [[DEF]](<vscale x 16 x s8>)
+ ; RV64I-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 16 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s32>) = G_ZEXT %1(<vscale x 16 x s8>)
+ $v8m8 = COPY %0(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+...
+---
+name: zext_nxv32i16_nxv32i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv32i16_nxv32i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_ZEXT [[DEF]](<vscale x 32 x s8>)
+ ; RV32I-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 32 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: zext_nxv32i16_nxv32i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_ZEXT [[DEF]](<vscale x 32 x s8>)
+ ; RV64I-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 32 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 32 x s16>) = G_ZEXT %1(<vscale x 32 x s8>)
+ $v8m8 = COPY %0(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+...
+
+# Extend from s16 element vectors
+---
+name: zext_nxv1i32_nxv1i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv1i32_nxv1i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_ZEXT [[DEF]](<vscale x 1 x s16>)
+ ; RV32I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: zext_nxv1i32_nxv1i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_ZEXT [[DEF]](<vscale x 1 x s16>)
+ ; RV64I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s32>) = G_ZEXT %1(<vscale x 1 x s16>)
+ $v8 = COPY %0(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv1i64_nxv1i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv1i64_nxv1i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_ZEXT [[DEF]](<vscale x 1 x s16>)
+ ; RV32I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: zext_nxv1i64_nxv1i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_ZEXT [[DEF]](<vscale x 1 x s16>)
+ ; RV64I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s64>) = G_ZEXT %1(<vscale x 1 x s16>)
+ $v8 = COPY %0(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv2i32_nxv2i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv2i32_nxv2i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_ZEXT [[DEF]](<vscale x 2 x s16>)
+ ; RV32I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 2 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: zext_nxv2i32_nxv2i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_ZEXT [[DEF]](<vscale x 2 x s16>)
+ ; RV64I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 2 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s32>) = G_ZEXT %1(<vscale x 2 x s16>)
+ $v8 = COPY %0(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv2i64_nxv2i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv2i64_nxv2i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_ZEXT [[DEF]](<vscale x 2 x s16>)
+ ; RV32I-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 2 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: zext_nxv2i64_nxv2i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_ZEXT [[DEF]](<vscale x 2 x s16>)
+ ; RV64I-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 2 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s64>) = G_ZEXT %1(<vscale x 2 x s16>)
+ $v8m2 = COPY %0(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: zext_nxv4i32_nxv4i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv4i32_nxv4i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_ZEXT [[DEF]](<vscale x 4 x s16>)
+ ; RV32I-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 4 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: zext_nxv4i32_nxv4i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_ZEXT [[DEF]](<vscale x 4 x s16>)
+ ; RV64I-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 4 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s32>) = G_ZEXT %1(<vscale x 4 x s16>)
+ $v8m2 = COPY %0(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+...
+---
+name: zext_nxv4i64_nxv4i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv4i64_nxv4i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_ZEXT [[DEF]](<vscale x 4 x s16>)
+ ; RV32I-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 4 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: zext_nxv4i64_nxv4i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_ZEXT [[DEF]](<vscale x 4 x s16>)
+ ; RV64I-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 4 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s64>) = G_ZEXT %1(<vscale x 4 x s16>)
+ $v8m4 = COPY %0(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: zext_nxv8i32_nxv8i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv8i32_nxv8i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_ZEXT [[DEF]](<vscale x 8 x s16>)
+ ; RV32I-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 8 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: zext_nxv8i32_nxv8i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_ZEXT [[DEF]](<vscale x 8 x s16>)
+ ; RV64I-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 8 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s32>) = G_ZEXT %1(<vscale x 8 x s16>)
+ $v8m4 = COPY %0(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+...
+---
+name: zext_nxv8i64_nxv8i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv8i64_nxv8i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_ZEXT [[DEF]](<vscale x 8 x s16>)
+ ; RV32I-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 8 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: zext_nxv8i64_nxv8i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_ZEXT [[DEF]](<vscale x 8 x s16>)
+ ; RV64I-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 8 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s64>) = G_ZEXT %1(<vscale x 8 x s16>)
+ $v8m8 = COPY %0(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
+---
+name: zext_nxv16i32_nxv16i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv16i32_nxv16i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_ZEXT [[DEF]](<vscale x 16 x s16>)
+ ; RV32I-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 16 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: zext_nxv16i32_nxv16i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_ZEXT [[DEF]](<vscale x 16 x s16>)
+ ; RV64I-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 16 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 16 x s32>) = G_ZEXT %1(<vscale x 16 x s16>)
+ $v8m8 = COPY %0(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+...
+
+# Extend from s32 element vectors
+---
+name: zext_nxv1i64_nxv1i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv1i64_nxv1i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_ZEXT [[DEF]](<vscale x 1 x s32>)
+ ; RV32I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: zext_nxv1i64_nxv1i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_ZEXT [[DEF]](<vscale x 1 x s32>)
+ ; RV64I-NEXT: $v8 = COPY [[ZEXT]](<vscale x 1 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 1 x s64>) = G_ZEXT %1(<vscale x 1 x s32>)
+ $v8 = COPY %0(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+...
+---
+name: zext_nxv2i64_nxv2i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv2i64_nxv2i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_ZEXT [[DEF]](<vscale x 2 x s32>)
+ ; RV32I-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 2 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: zext_nxv2i64_nxv2i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_ZEXT [[DEF]](<vscale x 2 x s32>)
+ ; RV64I-NEXT: $v8m2 = COPY [[ZEXT]](<vscale x 2 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 2 x s64>) = G_ZEXT %1(<vscale x 2 x s32>)
+ $v8m2 = COPY %0(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+...
+---
+name: zext_nxv4i64_nxv4i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv4i64_nxv4i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_ZEXT [[DEF]](<vscale x 4 x s32>)
+ ; RV32I-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 4 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: zext_nxv4i64_nxv4i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_ZEXT [[DEF]](<vscale x 4 x s32>)
+ ; RV64I-NEXT: $v8m4 = COPY [[ZEXT]](<vscale x 4 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %1:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 4 x s64>) = G_ZEXT %1(<vscale x 4 x s32>)
+ $v8m4 = COPY %0(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+...
+---
+name: zext_nxv8i64_nxv8i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv8i64_nxv8i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_ZEXT [[DEF]](<vscale x 8 x s32>)
+ ; RV32I-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 8 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: zext_nxv8i64_nxv8i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ZEXT:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_ZEXT [[DEF]](<vscale x 8 x s32>)
+ ; RV64I-NEXT: $v8m8 = COPY [[ZEXT]](<vscale x 8 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %1:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ %0:_(<vscale x 8 x s64>) = G_ZEXT %1(<vscale x 8 x s32>)
+ $v8m8 = COPY %0(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+...
>From 5b2b46bc9ec48a661b292a589dbc6accad64838c Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Fri, 15 Mar 2024 07:52:41 -0700
Subject: [PATCH 3/9] [RISCV][GISEL] Instruction selection for G_ZEXT, G_SEXT,
and G_ANYEXT with scalable vector type
---
.../instruction-select/rvv/anyext.mir | 704 ++++++++++++++++++
.../instruction-select/rvv/sext.mir | 340 +++++++++
.../instruction-select/rvv/zext.mir | 704 ++++++++++++++++++
3 files changed, 1748 insertions(+)
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/anyext.mir
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/sext.mir
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/zext.mir
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/anyext.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/anyext.mir
new file mode 100644
index 00000000000000..0b99be67a8f0d1
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/anyext.mir
@@ -0,0 +1,704 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - | FileCheck -check-prefix=RV32I %s
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - | FileCheck -check-prefix=RV64I %s
+
+---
+name: anyext_nxv1i16_nxv1i8
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv1i16_nxv1i8
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_MF4 [[DEF1]], [[DEF]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: anyext_nxv1i16_nxv1i8
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_MF4 [[DEF1]], [[DEF]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 1 x s16>) = G_ANYEXT %0(<vscale x 1 x s8>)
+ $v8 = COPY %1(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: anyext_nxv1i32_nxv1i8
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv1i32_nxv1i8
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF4_MF2 [[DEF1]], [[DEF]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: anyext_nxv1i32_nxv1i8
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF4_MF2 [[DEF1]], [[DEF]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 1 x s32>) = G_ANYEXT %0(<vscale x 1 x s8>)
+ $v8 = COPY %1(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: anyext_nxv1i64_nxv1i8
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv1i64_nxv1i8
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF8_M1 [[DEF1]], [[DEF]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: anyext_nxv1i64_nxv1i8
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF8_M1 [[DEF1]], [[DEF]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 1 x s64>) = G_ANYEXT %0(<vscale x 1 x s8>)
+ $v8 = COPY %1(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+
+...
+---
+name: anyext_nxv2i16_nxv2i8
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv2i16_nxv2i8
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_MF2 [[DEF1]], [[DEF]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: anyext_nxv2i16_nxv2i8
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_MF2 [[DEF1]], [[DEF]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 2 x s16>) = G_ANYEXT %0(<vscale x 2 x s8>)
+ $v8 = COPY %1(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: anyext_nxv2i32_nxv2i8
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv2i32_nxv2i8
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF4_M1 [[DEF1]], [[DEF]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: anyext_nxv2i32_nxv2i8
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF4_M1 [[DEF1]], [[DEF]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 2 x s32>) = G_ANYEXT %0(<vscale x 2 x s8>)
+ $v8 = COPY %1(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: anyext_nxv2i64_nxv2i8
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv2i64_nxv2i8
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vrm2 = PseudoVZEXT_VF8_M2 [[DEF1]], [[DEF]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m2 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: anyext_nxv2i64_nxv2i8
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vrm2 = PseudoVZEXT_VF8_M2 [[DEF1]], [[DEF]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m2 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 2 x s64>) = G_ANYEXT %0(<vscale x 2 x s8>)
+ $v8m2 = COPY %1(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: anyext_nxv4i16_nxv4i8
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv4i16_nxv4i8
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_M1 [[DEF1]], [[DEF]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: anyext_nxv4i16_nxv4i8
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_M1 [[DEF1]], [[DEF]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 4 x s16>) = G_ANYEXT %0(<vscale x 4 x s8>)
+ $v8 = COPY %1(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: anyext_nxv4i32_nxv4i8
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv4i32_nxv4i8
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vrm2 = PseudoVZEXT_VF4_M2 [[DEF1]], [[DEF]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m2 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: anyext_nxv4i32_nxv4i8
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vrm2 = PseudoVZEXT_VF4_M2 [[DEF1]], [[DEF]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m2 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 4 x s32>) = G_ANYEXT %0(<vscale x 4 x s8>)
+ $v8m2 = COPY %1(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: anyext_nxv4i64_nxv4i8
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv4i64_nxv4i8
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vrm4 = PseudoVZEXT_VF8_M4 [[DEF1]], [[DEF]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m4 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: anyext_nxv4i64_nxv4i8
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vrm4 = PseudoVZEXT_VF8_M4 [[DEF1]], [[DEF]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m4 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 4 x s64>) = G_ANYEXT %0(<vscale x 4 x s8>)
+ $v8m4 = COPY %1(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: anyext_nxv8i16_nxv8i8
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv8i16_nxv8i8
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vrm2 = PseudoVZEXT_VF2_M2 [[DEF1]], [[DEF]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m2 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: anyext_nxv8i16_nxv8i8
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vrm2 = PseudoVZEXT_VF2_M2 [[DEF1]], [[DEF]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m2 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 8 x s16>) = G_ANYEXT %0(<vscale x 8 x s8>)
+ $v8m2 = COPY %1(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: anyext_nxv8i32_nxv8i8
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv8i32_nxv8i8
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vrm4 = PseudoVZEXT_VF4_M4 [[DEF1]], [[DEF]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m4 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: anyext_nxv8i32_nxv8i8
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vrm4 = PseudoVZEXT_VF4_M4 [[DEF1]], [[DEF]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m4 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 8 x s32>) = G_ANYEXT %0(<vscale x 8 x s8>)
+ $v8m4 = COPY %1(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: anyext_nxv8i64_nxv8i8
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv8i64_nxv8i8
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vrm8 = PseudoVZEXT_VF8_M8 [[DEF1]], [[DEF]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m8 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: anyext_nxv8i64_nxv8i8
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vrm8 = PseudoVZEXT_VF8_M8 [[DEF1]], [[DEF]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m8 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 8 x s64>) = G_ANYEXT %0(<vscale x 8 x s8>)
+ $v8m8 = COPY %1(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: anyext_nxv16i16_nxv16i8
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv16i16_nxv16i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vrm4 = PseudoVZEXT_VF2_M4 [[DEF1]], [[DEF]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m4 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: anyext_nxv16i16_nxv16i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vrm4 = PseudoVZEXT_VF2_M4 [[DEF1]], [[DEF]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m4 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:vrb(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 16 x s16>) = G_ANYEXT %0(<vscale x 16 x s8>)
+ $v8m4 = COPY %1(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: anyext_nxv16i32_nxv16i8
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv16i32_nxv16i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vrm8 = PseudoVZEXT_VF4_M8 [[DEF1]], [[DEF]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m8 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: anyext_nxv16i32_nxv16i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vrm8 = PseudoVZEXT_VF4_M8 [[DEF1]], [[DEF]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m8 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:vrb(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 16 x s32>) = G_ANYEXT %0(<vscale x 16 x s8>)
+ $v8m8 = COPY %1(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: anyext_nxv32i16_nxv32i8
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv32i16_nxv32i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vrm8 = PseudoVZEXT_VF2_M8 [[DEF1]], [[DEF]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m8 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: anyext_nxv32i16_nxv32i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vrm8 = PseudoVZEXT_VF2_M8 [[DEF1]], [[DEF]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m8 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:vrb(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 32 x s16>) = G_ANYEXT %0(<vscale x 32 x s8>)
+ $v8m8 = COPY %1(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: anyext_nxv1i32_nxv1i16
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv1i32_nxv1i16
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_MF2 [[DEF1]], [[DEF]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: anyext_nxv1i32_nxv1i16
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_MF2 [[DEF1]], [[DEF]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 1 x s32>) = G_ANYEXT %0(<vscale x 1 x s16>)
+ $v8 = COPY %1(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: anyext_nxv1i64_nxv1i16
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv1i64_nxv1i16
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF4_M1 [[DEF1]], [[DEF]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: anyext_nxv1i64_nxv1i16
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF4_M1 [[DEF1]], [[DEF]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 1 x s64>) = G_ANYEXT %0(<vscale x 1 x s16>)
+ $v8 = COPY %1(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+
+...
+---
+name: anyext_nxv2i32_nxv2i16
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv2i32_nxv2i16
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_M1 [[DEF1]], [[DEF]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: anyext_nxv2i32_nxv2i16
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_M1 [[DEF1]], [[DEF]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 2 x s32>) = G_ANYEXT %0(<vscale x 2 x s16>)
+ $v8 = COPY %1(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: anyext_nxv2i64_nxv2i16
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv2i64_nxv2i16
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vrm2 = PseudoVZEXT_VF4_M2 [[DEF1]], [[DEF]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m2 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: anyext_nxv2i64_nxv2i16
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vrm2 = PseudoVZEXT_VF4_M2 [[DEF1]], [[DEF]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m2 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:vrb(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 2 x s64>) = G_ANYEXT %0(<vscale x 2 x s16>)
+ $v8m2 = COPY %1(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: anyext_nxv4i32_nxv4i16
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv4i32_nxv4i16
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vrm2 = PseudoVZEXT_VF2_M2 [[DEF1]], [[DEF]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m2 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: anyext_nxv4i32_nxv4i16
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vrm2 = PseudoVZEXT_VF2_M2 [[DEF1]], [[DEF]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m2 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:vrb(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 4 x s32>) = G_ANYEXT %0(<vscale x 4 x s16>)
+ $v8m2 = COPY %1(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: anyext_nxv4i64_nxv4i16
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv4i64_nxv4i16
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vrm4 = PseudoVZEXT_VF4_M4 [[DEF1]], [[DEF]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m4 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: anyext_nxv4i64_nxv4i16
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vrm4 = PseudoVZEXT_VF4_M4 [[DEF1]], [[DEF]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m4 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:vrb(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 4 x s64>) = G_ANYEXT %0(<vscale x 4 x s16>)
+ $v8m4 = COPY %1(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: anyext_nxv8i32_nxv8i16
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv8i32_nxv8i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vrm4 = PseudoVZEXT_VF2_M4 [[DEF1]], [[DEF]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m4 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: anyext_nxv8i32_nxv8i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vrm4 = PseudoVZEXT_VF2_M4 [[DEF1]], [[DEF]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m4 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:vrb(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 8 x s32>) = G_ANYEXT %0(<vscale x 8 x s16>)
+ $v8m4 = COPY %1(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: anyext_nxv8i64_nxv8i16
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv8i64_nxv8i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vrm8 = PseudoVZEXT_VF4_M8 [[DEF1]], [[DEF]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m8 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: anyext_nxv8i64_nxv8i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vrm8 = PseudoVZEXT_VF4_M8 [[DEF1]], [[DEF]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m8 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:vrb(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 8 x s64>) = G_ANYEXT %0(<vscale x 8 x s16>)
+ $v8m8 = COPY %1(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: anyext_nxv16i32_nxv16i16
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv16i32_nxv16i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vrm8 = PseudoVZEXT_VF2_M8 [[DEF1]], [[DEF]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m8 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: anyext_nxv16i32_nxv16i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vrm8 = PseudoVZEXT_VF2_M8 [[DEF1]], [[DEF]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m8 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:vrb(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 16 x s32>) = G_ANYEXT %0(<vscale x 16 x s16>)
+ $v8m8 = COPY %1(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: anyext_nxv1i64_nxv1i32
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv1i64_nxv1i32
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_M1 [[DEF1]], [[DEF]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: anyext_nxv1i64_nxv1i32
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_M1 [[DEF1]], [[DEF]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 1 x s64>) = G_ANYEXT %0(<vscale x 1 x s32>)
+ $v8 = COPY %1(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+
+...
+---
+name: anyext_nxv2i64_nxv2i32
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv2i64_nxv2i32
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vrm2 = PseudoVZEXT_VF2_M2 [[DEF1]], [[DEF]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m2 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: anyext_nxv2i64_nxv2i32
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vrm2 = PseudoVZEXT_VF2_M2 [[DEF1]], [[DEF]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m2 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:vrb(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 2 x s64>) = G_ANYEXT %0(<vscale x 2 x s32>)
+ $v8m2 = COPY %1(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: anyext_nxv4i64_nxv4i32
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv4i64_nxv4i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vrm4 = PseudoVZEXT_VF2_M4 [[DEF1]], [[DEF]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m4 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: anyext_nxv4i64_nxv4i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vrm4 = PseudoVZEXT_VF2_M4 [[DEF1]], [[DEF]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m4 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:vrb(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 4 x s64>) = G_ANYEXT %0(<vscale x 4 x s32>)
+ $v8m4 = COPY %1(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: anyext_nxv8i64_nxv8i32
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: anyext_nxv8i64_nxv8i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vrm8 = PseudoVZEXT_VF2_M8 [[DEF1]], [[DEF]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m8 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: anyext_nxv8i64_nxv8i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vrm8 = PseudoVZEXT_VF2_M8 [[DEF1]], [[DEF]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m8 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:vrb(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 8 x s64>) = G_ANYEXT %0(<vscale x 8 x s32>)
+ $v8m8 = COPY %1(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/sext.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/sext.mir
new file mode 100644
index 00000000000000..8a7b926952beae
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/sext.mir
@@ -0,0 +1,340 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - | FileCheck -check-prefix=RV32I %s
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - | FileCheck -check-prefix=RV64I %s
+
+---
+name: sext_nxv1i16_nxv1i8
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ %0:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 1 x s16>) = G_SEXT %0(<vscale x 1 x s8>)
+ $v8 = COPY %1(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: sext_nxv1i32_nxv1i8
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ %0:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 1 x s32>) = G_SEXT %0(<vscale x 1 x s8>)
+ $v8 = COPY %1(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: sext_nxv1i64_nxv1i8
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ %0:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 1 x s64>) = G_SEXT %0(<vscale x 1 x s8>)
+ $v8 = COPY %1(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+
+...
+---
+name: sext_nxv2i16_nxv2i8
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ %0:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 2 x s16>) = G_SEXT %0(<vscale x 2 x s8>)
+ $v8 = COPY %1(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: sext_nxv2i32_nxv2i8
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ %0:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 2 x s32>) = G_SEXT %0(<vscale x 2 x s8>)
+ $v8 = COPY %1(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: sext_nxv2i64_nxv2i8
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ %0:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 2 x s64>) = G_SEXT %0(<vscale x 2 x s8>)
+ $v8m2 = COPY %1(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: sext_nxv4i16_nxv4i8
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ %0:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 4 x s16>) = G_SEXT %0(<vscale x 4 x s8>)
+ $v8 = COPY %1(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: sext_nxv4i32_nxv4i8
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ %0:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 4 x s32>) = G_SEXT %0(<vscale x 4 x s8>)
+ $v8m2 = COPY %1(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: sext_nxv4i64_nxv4i8
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ %0:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 4 x s64>) = G_SEXT %0(<vscale x 4 x s8>)
+ $v8m4 = COPY %1(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: sext_nxv8i16_nxv8i8
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ %0:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 8 x s16>) = G_SEXT %0(<vscale x 8 x s8>)
+ $v8m2 = COPY %1(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: sext_nxv8i32_nxv8i8
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ %0:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 8 x s32>) = G_SEXT %0(<vscale x 8 x s8>)
+ $v8m4 = COPY %1(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: sext_nxv8i64_nxv8i8
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ %0:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 8 x s64>) = G_SEXT %0(<vscale x 8 x s8>)
+ $v8m8 = COPY %1(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: sext_nxv16i16_nxv16i8
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ %0:vrb(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 16 x s16>) = G_SEXT %0(<vscale x 16 x s8>)
+ $v8m4 = COPY %1(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: sext_nxv16i32_nxv16i8
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ %0:vrb(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 16 x s32>) = G_SEXT %0(<vscale x 16 x s8>)
+ $v8m8 = COPY %1(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: sext_nxv32i16_nxv32i8
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ %0:vrb(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 32 x s16>) = G_SEXT %0(<vscale x 32 x s8>)
+ $v8m8 = COPY %1(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: sext_nxv1i32_nxv1i16
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ %0:vrb(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 1 x s32>) = G_SEXT %0(<vscale x 1 x s16>)
+ $v8 = COPY %1(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: sext_nxv1i64_nxv1i16
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ %0:vrb(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 1 x s64>) = G_SEXT %0(<vscale x 1 x s16>)
+ $v8 = COPY %1(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+
+...
+---
+name: sext_nxv2i32_nxv2i16
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ %0:vrb(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 2 x s32>) = G_SEXT %0(<vscale x 2 x s16>)
+ $v8 = COPY %1(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: sext_nxv2i64_nxv2i16
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ %0:vrb(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 2 x s64>) = G_SEXT %0(<vscale x 2 x s16>)
+ $v8m2 = COPY %1(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: sext_nxv4i32_nxv4i16
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ %0:vrb(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 4 x s32>) = G_SEXT %0(<vscale x 4 x s16>)
+ $v8m2 = COPY %1(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: sext_nxv4i64_nxv4i16
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ %0:vrb(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 4 x s64>) = G_SEXT %0(<vscale x 4 x s16>)
+ $v8m4 = COPY %1(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: sext_nxv8i32_nxv8i16
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ %0:vrb(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 8 x s32>) = G_SEXT %0(<vscale x 8 x s16>)
+ $v8m4 = COPY %1(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: sext_nxv8i64_nxv8i16
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ %0:vrb(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 8 x s64>) = G_SEXT %0(<vscale x 8 x s16>)
+ $v8m8 = COPY %1(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: sext_nxv16i32_nxv16i16
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ %0:vrb(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 16 x s32>) = G_SEXT %0(<vscale x 16 x s16>)
+ $v8m8 = COPY %1(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: sext_nxv1i64_nxv1i32
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ %0:vrb(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 1 x s64>) = G_SEXT %0(<vscale x 1 x s32>)
+ $v8 = COPY %1(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+
+...
+---
+name: sext_nxv2i64_nxv2i32
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ %0:vrb(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 2 x s64>) = G_SEXT %0(<vscale x 2 x s32>)
+ $v8m2 = COPY %1(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: sext_nxv4i64_nxv4i32
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ %0:vrb(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 4 x s64>) = G_SEXT %0(<vscale x 4 x s32>)
+ $v8m4 = COPY %1(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: sext_nxv8i64_nxv8i32
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ %0:vrb(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 8 x s64>) = G_SEXT %0(<vscale x 8 x s32>)
+ $v8m8 = COPY %1(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/zext.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/zext.mir
new file mode 100644
index 00000000000000..4d31650942ad92
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/zext.mir
@@ -0,0 +1,704 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - | FileCheck -check-prefix=RV32I %s
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - | FileCheck -check-prefix=RV64I %s
+
+---
+name: zext_nxv1i16_nxv1i8
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv1i16_nxv1i8
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_MF4 [[DEF1]], [[DEF]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: zext_nxv1i16_nxv1i8
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_MF4 [[DEF1]], [[DEF]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 1 x s16>) = G_ZEXT %0(<vscale x 1 x s8>)
+ $v8 = COPY %1(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: zext_nxv1i32_nxv1i8
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv1i32_nxv1i8
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF4_MF2 [[DEF1]], [[DEF]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: zext_nxv1i32_nxv1i8
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF4_MF2 [[DEF1]], [[DEF]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 1 x s32>) = G_ZEXT %0(<vscale x 1 x s8>)
+ $v8 = COPY %1(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: zext_nxv1i64_nxv1i8
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv1i64_nxv1i8
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF8_M1 [[DEF1]], [[DEF]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: zext_nxv1i64_nxv1i8
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF8_M1 [[DEF1]], [[DEF]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 1 x s64>) = G_ZEXT %0(<vscale x 1 x s8>)
+ $v8 = COPY %1(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+
+...
+---
+name: zext_nxv2i16_nxv2i8
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv2i16_nxv2i8
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_MF2 [[DEF1]], [[DEF]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: zext_nxv2i16_nxv2i8
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_MF2 [[DEF1]], [[DEF]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 2 x s16>) = G_ZEXT %0(<vscale x 2 x s8>)
+ $v8 = COPY %1(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: zext_nxv2i32_nxv2i8
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv2i32_nxv2i8
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF4_M1 [[DEF1]], [[DEF]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: zext_nxv2i32_nxv2i8
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF4_M1 [[DEF1]], [[DEF]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 2 x s32>) = G_ZEXT %0(<vscale x 2 x s8>)
+ $v8 = COPY %1(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: zext_nxv2i64_nxv2i8
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv2i64_nxv2i8
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vrm2 = PseudoVZEXT_VF8_M2 [[DEF1]], [[DEF]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m2 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: zext_nxv2i64_nxv2i8
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vrm2 = PseudoVZEXT_VF8_M2 [[DEF1]], [[DEF]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m2 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 2 x s64>) = G_ZEXT %0(<vscale x 2 x s8>)
+ $v8m2 = COPY %1(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: zext_nxv4i16_nxv4i8
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv4i16_nxv4i8
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_M1 [[DEF1]], [[DEF]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: zext_nxv4i16_nxv4i8
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_M1 [[DEF1]], [[DEF]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 4 x s16>) = G_ZEXT %0(<vscale x 4 x s8>)
+ $v8 = COPY %1(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: zext_nxv4i32_nxv4i8
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv4i32_nxv4i8
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vrm2 = PseudoVZEXT_VF4_M2 [[DEF1]], [[DEF]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m2 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: zext_nxv4i32_nxv4i8
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vrm2 = PseudoVZEXT_VF4_M2 [[DEF1]], [[DEF]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m2 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 4 x s32>) = G_ZEXT %0(<vscale x 4 x s8>)
+ $v8m2 = COPY %1(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: zext_nxv4i64_nxv4i8
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv4i64_nxv4i8
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vrm4 = PseudoVZEXT_VF8_M4 [[DEF1]], [[DEF]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m4 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: zext_nxv4i64_nxv4i8
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vrm4 = PseudoVZEXT_VF8_M4 [[DEF1]], [[DEF]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m4 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 4 x s64>) = G_ZEXT %0(<vscale x 4 x s8>)
+ $v8m4 = COPY %1(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: zext_nxv8i16_nxv8i8
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv8i16_nxv8i8
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vrm2 = PseudoVZEXT_VF2_M2 [[DEF1]], [[DEF]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m2 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: zext_nxv8i16_nxv8i8
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vrm2 = PseudoVZEXT_VF2_M2 [[DEF1]], [[DEF]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m2 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 8 x s16>) = G_ZEXT %0(<vscale x 8 x s8>)
+ $v8m2 = COPY %1(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: zext_nxv8i32_nxv8i8
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv8i32_nxv8i8
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vrm4 = PseudoVZEXT_VF4_M4 [[DEF1]], [[DEF]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m4 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: zext_nxv8i32_nxv8i8
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vrm4 = PseudoVZEXT_VF4_M4 [[DEF1]], [[DEF]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m4 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 8 x s32>) = G_ZEXT %0(<vscale x 8 x s8>)
+ $v8m4 = COPY %1(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: zext_nxv8i64_nxv8i8
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv8i64_nxv8i8
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vrm8 = PseudoVZEXT_VF8_M8 [[DEF1]], [[DEF]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m8 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: zext_nxv8i64_nxv8i8
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vrm8 = PseudoVZEXT_VF8_M8 [[DEF1]], [[DEF]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m8 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 8 x s64>) = G_ZEXT %0(<vscale x 8 x s8>)
+ $v8m8 = COPY %1(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: zext_nxv16i16_nxv16i8
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv16i16_nxv16i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vrm4 = PseudoVZEXT_VF2_M4 [[DEF1]], [[DEF]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m4 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: zext_nxv16i16_nxv16i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vrm4 = PseudoVZEXT_VF2_M4 [[DEF1]], [[DEF]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m4 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:vrb(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 16 x s16>) = G_ZEXT %0(<vscale x 16 x s8>)
+ $v8m4 = COPY %1(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: zext_nxv16i32_nxv16i8
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv16i32_nxv16i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vrm8 = PseudoVZEXT_VF4_M8 [[DEF1]], [[DEF]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m8 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: zext_nxv16i32_nxv16i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vrm8 = PseudoVZEXT_VF4_M8 [[DEF1]], [[DEF]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m8 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:vrb(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 16 x s32>) = G_ZEXT %0(<vscale x 16 x s8>)
+ $v8m8 = COPY %1(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: zext_nxv32i16_nxv32i8
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv32i16_nxv32i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vrm8 = PseudoVZEXT_VF2_M8 [[DEF1]], [[DEF]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m8 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: zext_nxv32i16_nxv32i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vrm8 = PseudoVZEXT_VF2_M8 [[DEF1]], [[DEF]], -1, 4 /* e16 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m8 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:vrb(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 32 x s16>) = G_ZEXT %0(<vscale x 32 x s8>)
+ $v8m8 = COPY %1(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: zext_nxv1i32_nxv1i16
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv1i32_nxv1i16
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_MF2 [[DEF1]], [[DEF]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: zext_nxv1i32_nxv1i16
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_MF2 [[DEF1]], [[DEF]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 1 x s32>) = G_ZEXT %0(<vscale x 1 x s16>)
+ $v8 = COPY %1(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: zext_nxv1i64_nxv1i16
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv1i64_nxv1i16
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF4_M1 [[DEF1]], [[DEF]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: zext_nxv1i64_nxv1i16
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF4_M1 [[DEF1]], [[DEF]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 1 x s64>) = G_ZEXT %0(<vscale x 1 x s16>)
+ $v8 = COPY %1(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+
+...
+---
+name: zext_nxv2i32_nxv2i16
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv2i32_nxv2i16
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_M1 [[DEF1]], [[DEF]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: zext_nxv2i32_nxv2i16
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_M1 [[DEF1]], [[DEF]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 2 x s32>) = G_ZEXT %0(<vscale x 2 x s16>)
+ $v8 = COPY %1(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: zext_nxv2i64_nxv2i16
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv2i64_nxv2i16
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vrm2 = PseudoVZEXT_VF4_M2 [[DEF1]], [[DEF]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m2 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: zext_nxv2i64_nxv2i16
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vrm2 = PseudoVZEXT_VF4_M2 [[DEF1]], [[DEF]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m2 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:vrb(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 2 x s64>) = G_ZEXT %0(<vscale x 2 x s16>)
+ $v8m2 = COPY %1(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: zext_nxv4i32_nxv4i16
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv4i32_nxv4i16
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vrm2 = PseudoVZEXT_VF2_M2 [[DEF1]], [[DEF]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m2 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: zext_nxv4i32_nxv4i16
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vrm2 = PseudoVZEXT_VF2_M2 [[DEF1]], [[DEF]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m2 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:vrb(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 4 x s32>) = G_ZEXT %0(<vscale x 4 x s16>)
+ $v8m2 = COPY %1(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: zext_nxv4i64_nxv4i16
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv4i64_nxv4i16
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vrm4 = PseudoVZEXT_VF4_M4 [[DEF1]], [[DEF]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m4 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: zext_nxv4i64_nxv4i16
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vrm4 = PseudoVZEXT_VF4_M4 [[DEF1]], [[DEF]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m4 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:vrb(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 4 x s64>) = G_ZEXT %0(<vscale x 4 x s16>)
+ $v8m4 = COPY %1(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: zext_nxv8i32_nxv8i16
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv8i32_nxv8i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vrm4 = PseudoVZEXT_VF2_M4 [[DEF1]], [[DEF]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m4 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: zext_nxv8i32_nxv8i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vrm4 = PseudoVZEXT_VF2_M4 [[DEF1]], [[DEF]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m4 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:vrb(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 8 x s32>) = G_ZEXT %0(<vscale x 8 x s16>)
+ $v8m4 = COPY %1(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: zext_nxv8i64_nxv8i16
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv8i64_nxv8i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vrm8 = PseudoVZEXT_VF4_M8 [[DEF1]], [[DEF]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m8 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: zext_nxv8i64_nxv8i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vrm8 = PseudoVZEXT_VF4_M8 [[DEF1]], [[DEF]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m8 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:vrb(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 8 x s64>) = G_ZEXT %0(<vscale x 8 x s16>)
+ $v8m8 = COPY %1(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: zext_nxv16i32_nxv16i16
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv16i32_nxv16i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vrm8 = PseudoVZEXT_VF2_M8 [[DEF1]], [[DEF]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m8 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: zext_nxv16i32_nxv16i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vrm8 = PseudoVZEXT_VF2_M8 [[DEF1]], [[DEF]], -1, 5 /* e32 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m8 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:vrb(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 16 x s32>) = G_ZEXT %0(<vscale x 16 x s16>)
+ $v8m8 = COPY %1(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: zext_nxv1i64_nxv1i32
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv1i64_nxv1i32
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_M1 [[DEF1]], [[DEF]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: zext_nxv1i64_nxv1i32
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_M1 [[DEF1]], [[DEF]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 1 x s64>) = G_ZEXT %0(<vscale x 1 x s32>)
+ $v8 = COPY %1(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+
+...
+---
+name: zext_nxv2i64_nxv2i32
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv2i64_nxv2i32
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vrm2 = PseudoVZEXT_VF2_M2 [[DEF1]], [[DEF]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m2 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: zext_nxv2i64_nxv2i32
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vrm2 = PseudoVZEXT_VF2_M2 [[DEF1]], [[DEF]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m2 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:vrb(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 2 x s64>) = G_ZEXT %0(<vscale x 2 x s32>)
+ $v8m2 = COPY %1(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: zext_nxv4i64_nxv4i32
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv4i64_nxv4i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vrm4 = PseudoVZEXT_VF2_M4 [[DEF1]], [[DEF]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m4 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: zext_nxv4i64_nxv4i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vrm4 = PseudoVZEXT_VF2_M4 [[DEF1]], [[DEF]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m4 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:vrb(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 4 x s64>) = G_ZEXT %0(<vscale x 4 x s32>)
+ $v8m4 = COPY %1(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: zext_nxv8i64_nxv8i32
+legalized: true
+regBankSelected: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: zext_nxv8i64_nxv8i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vrm8 = PseudoVZEXT_VF2_M8 [[DEF1]], [[DEF]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV32I-NEXT: $v8m8 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: zext_nxv8i64_nxv8i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vrm8 = PseudoVZEXT_VF2_M8 [[DEF1]], [[DEF]], -1, 6 /* e64 */, 3 /* ta, ma */
+ ; RV64I-NEXT: $v8m8 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:vrb(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 8 x s64>) = G_ZEXT %0(<vscale x 8 x s32>)
+ $v8m8 = COPY %1(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+
+...
>From 9757f78ba20c5f39f1adf88bf4f6f750f1defe22 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Fri, 15 Mar 2024 09:28:53 -0700
Subject: [PATCH 4/9] [RISCV][GISEL] Legalize G_SPLAT_VECTOR
---
.../Target/RISCV/GISel/RISCVLegalizerInfo.cpp | 98 ++-
.../Target/RISCV/GISel/RISCVLegalizerInfo.h | 1 +
llvm/lib/Target/RISCV/RISCVInstrGISel.td | 16 +
.../rvv/legalize-splatvector-rv32.mir | 734 ++++++++++++++++++
.../rvv/legalize-splatvector-rv64.mir | 566 ++++++++++++++
5 files changed, 1406 insertions(+), 9 deletions(-)
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-splatvector-rv32.mir
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-splatvector-rv64.mir
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 9be7309dce7f56..f9055b789a87c2 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -414,6 +414,11 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
.clampScalar(0, sXLen, sXLen)
.customFor({sXLen});
+ getActionDefinitionsBuilder(G_SPLAT_VECTOR)
+ .legalIf(
+ all(typeIsLegalIntOrFPVec(0, IntOrFPVecTys, ST), typeIs(1, sXLen)))
+ .customIf(all(typeIsLegalBoolVec(0, BoolVecTys, ST), typeInSet(1, {s1})));
+
getLegacyLegalizerInfo().computeTables();
}
@@ -593,15 +598,7 @@ bool RISCVLegalizerInfo::legalizeExt(MachineInstr &MI,
LLT DstTy = MRI.getType(Dst);
LLT SrcTy = MRI.getType(Src);
-
- // The only custom legalization of extends we handle are vector extends.
- if (!DstTy.isVector() || !SrcTy.isVector())
- return false;
-
- // The only custom legalization of extends is from mask types
- if (SrcTy.getElementType().getSizeInBits() != 1)
- return false;
-
+
int64_t ExtTrueVal =
Opc == TargetOpcode::G_ZEXT || Opc == TargetOpcode::G_ANYEXT ? 1 : -1;
LLT DstEltTy = DstTy.getElementType();
@@ -614,6 +611,87 @@ bool RISCVLegalizerInfo::legalizeExt(MachineInstr &MI,
return true;
}
+/// Return the type of the mask type suitable for masking the provided
+/// vector type. This is simply an i1 element type vector of the same
+/// (possibly scalable) length.
+static LLT getMaskTypeFor(LLT VecTy) {
+ assert(VecTy.isVector());
+ ElementCount EC = VecTy.getElementCount();
+ return LLT::vector(EC, LLT::scalar(1));
+}
+
+/// Creates an all ones mask suitable for masking a vector of type VecTy with
+/// vector length VL.
+static MachineInstrBuilder buildAllOnesMask(LLT VecTy, const SrcOp &VL,
+ MachineIRBuilder &MIB,
+ MachineRegisterInfo &MRI) {
+ LLT MaskTy = getMaskTypeFor(VecTy);
+ return MIB.buildInstr(RISCV::G_VMSET_VL, {MaskTy}, {VL});
+}
+
+/// Gets the two common "VL" operands: an all-ones mask and the vector length.
+/// VecTy is a scalable vector type.
+static std::pair<MachineInstrBuilder, Register>
+buildDefaultVLOps(const DstOp &Dst, MachineIRBuilder &MIB,
+ MachineRegisterInfo &MRI) {
+ LLT VecTy = Dst.getLLTTy(MRI);
+ assert(VecTy.isScalableVector() && "Expecting scalable container type");
+ Register VL(RISCV::X0);
+ MachineInstrBuilder Mask = buildAllOnesMask(VecTy, VL, MIB, MRI);
+ return {Mask, VL};
+}
+
+// Lower splats of s1 types to G_ICMP. For each mask vector type, we have a
+// legal equivalently-sized i8 type, so we can use that as a go-between.
+// Splats of s1 types that have constant value can be legalized as VMSET_VL or
+// VMCLR_VL.
+bool RISCVLegalizerInfo::legalizeSplatVector(MachineInstr &MI,
+ MachineIRBuilder &MIB) const {
+ assert(MI.getOpcode() == TargetOpcode::G_SPLAT_VECTOR);
+
+ MachineRegisterInfo &MRI = *MIB.getMRI();
+
+ Register Dst = MI.getOperand(0).getReg();
+ Register SplatVal = MI.getOperand(1).getReg();
+
+ LLT VecTy = MRI.getType(Dst);
+ LLT SplatTy = MRI.getType(SplatVal);
+
+ // We should not see any splats on sizes larger than s1 since those are legal
+ // and do not need custom legalization.
+ if (SplatTy.getSizeInBits() != 1)
+ return false;
+
+ // All-zeros or all-ones splats are handled specially.
+ MachineInstr &SplatValMI = *MRI.getVRegDef(SplatVal);
+ if (isAllOnesOrAllOnesSplat(SplatValMI, MRI)) {
+ auto VL = buildDefaultVLOps(VecTy, MIB, MRI).second;
+ MIB.buildInstr(RISCV::G_VMSET_VL, {Dst}, {VL});
+ MI.eraseFromParent();
+ return true;
+ }
+ if (isNullOrNullSplat(SplatValMI, MRI)) {
+ auto VL = buildDefaultVLOps(VecTy, MIB, MRI).second;
+ MIB.buildInstr(RISCV::G_VMCLR_VL, {Dst}, {VL});
+ MI.eraseFromParent();
+ return true;
+ }
+
+ // Handle non-constant mask splat (i.e. not sure if it's all zeros or all
+ // ones) by promoting it to an s8 splat.
+ LLT InterEltTy = LLT::scalar(8);
+ LLT InterTy = VecTy.changeElementType(InterEltTy);
+ auto ZExtSplatVal = MIB.buildZExt(InterEltTy, SplatVal);
+ auto And =
+ MIB.buildAnd(InterEltTy, ZExtSplatVal, MIB.buildConstant(InterEltTy, 1));
+ auto LHS = MIB.buildSplatVector(InterTy, And);
+ auto ZeroSplat =
+ MIB.buildSplatVector(InterTy, MIB.buildConstant(InterEltTy, 0));
+ MIB.buildICmp(CmpInst::Predicate::ICMP_NE, Dst, LHS, ZeroSplat);
+ MI.eraseFromParent();
+ return true;
+}
+
bool RISCVLegalizerInfo::legalizeCustom(
LegalizerHelper &Helper, MachineInstr &MI,
LostDebugLocObserver &LocObserver) const {
@@ -677,6 +755,8 @@ bool RISCVLegalizerInfo::legalizeCustom(
case TargetOpcode::G_SEXT:
case TargetOpcode::G_ANYEXT:
return legalizeExt(MI, MIRBuilder);
+ case TargetOpcode::G_SPLAT_VECTOR:
+ return legalizeSplatVector(MI, MIRBuilder);
}
llvm_unreachable("expected switch to return");
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
index a31bc482c69337..5bb1e7a7282788 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
@@ -44,6 +44,7 @@ class RISCVLegalizerInfo : public LegalizerInfo {
bool legalizeVAStart(MachineInstr &MI, MachineIRBuilder &MIRBuilder) const;
bool legalizeVScale(MachineInstr &MI, MachineIRBuilder &MIB) const;
bool legalizeExt(MachineInstr &MI, MachineIRBuilder &MIRBuilder) const;
+ bool legalizeSplatVector(MachineInstr &MI, MachineIRBuilder &MIB) const;
};
} // end namespace llvm
#endif
diff --git a/llvm/lib/Target/RISCV/RISCVInstrGISel.td b/llvm/lib/Target/RISCV/RISCVInstrGISel.td
index 54e22d6257814a..267dc592cc7af7 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrGISel.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrGISel.td
@@ -32,3 +32,19 @@ def G_READ_VLENB : RISCVGenericInstruction {
let hasSideEffects = false;
}
def : GINodeEquiv<G_READ_VLENB, riscv_read_vlenb>;
+
+// Pseudo equivalent to a RISCVISD::VMCLR_VL
+def G_VMCLR_VL : RISCVGenericInstruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type1:$vl);
+ let hasSideEffects = false;
+}
+def : GINodeEquiv<G_VMCLR_VL, riscv_vmclr_vl>;
+
+// Pseudo equivalent to a RISCVISD::VMSET_VL
+def G_VMSET_VL : RISCVGenericInstruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type1:$vl);
+ let hasSideEffects = false;
+}
+def : GINodeEquiv<G_VMSET_VL, riscv_vmset_vl>;
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-splatvector-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-splatvector-rv32.mir
new file mode 100644
index 00000000000000..bb791f60c7a1a5
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-splatvector-rv32.mir
@@ -0,0 +1,734 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+
+---
+name: splatvector_nxv1i1_0
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv1i1_0
+ ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMCLR_VL $x0
+ ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 1 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s1) = G_CONSTANT i1 0
+ %1:_(<vscale x 1 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 1 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv1i1_1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv1i1_1
+ ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: $v0 = COPY [[VMSET_VL1]](<vscale x 1 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s1) = G_CONSTANT i1 1
+ %1:_(<vscale x 1 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 1 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv1i1_2
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ liveins: $x10
+
+ ; CHECK-LABEL: name: splatvector_nxv1i1_2
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C1]]
+ ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[AND]], [[C]]
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[AND1]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[TRUNC]](s8)
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[C2]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[TRUNC1]](s8)
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 1 x s1>) = G_ICMP intpred(ne), [[SPLAT_VECTOR]](<vscale x 1 x s8>), [[SPLAT_VECTOR1]]
+ ; CHECK-NEXT: $v0 = COPY [[ICMP]](<vscale x 1 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s32) = COPY $x10
+ %1:_(s1) = G_TRUNC %0(s32)
+ %2:_(<vscale x 1 x s1>) = G_SPLAT_VECTOR %1(s1)
+ $v0 = COPY %2(<vscale x 1 x s1>)
+ PseudoRET implicit $v0
+...
+---
+name: splatvector_nxv2i1_0
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv2i1_0
+ ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMCLR_VL $x0
+ ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 2 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s1) = G_CONSTANT i1 0
+ %1:_(<vscale x 2 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 2 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv2i1_1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv2i1_1
+ ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: $v0 = COPY [[VMSET_VL1]](<vscale x 2 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s1) = G_CONSTANT i1 1
+ %1:_(<vscale x 2 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 2 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv2i1_2
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ liveins: $x10
+
+ ; CHECK-LABEL: name: splatvector_nxv2i1_2
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C1]]
+ ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[AND]], [[C]]
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[AND1]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[TRUNC]](s8)
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[C2]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[TRUNC1]](s8)
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 2 x s1>) = G_ICMP intpred(ne), [[SPLAT_VECTOR]](<vscale x 2 x s8>), [[SPLAT_VECTOR1]]
+ ; CHECK-NEXT: $v0 = COPY [[ICMP]](<vscale x 2 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s32) = COPY $x10
+ %1:_(s1) = G_TRUNC %0(s32)
+ %2:_(<vscale x 2 x s1>) = G_SPLAT_VECTOR %1(s1)
+ $v0 = COPY %2(<vscale x 2 x s1>)
+ PseudoRET implicit $v0
+...
+---
+name: splatvector_nxv4i1_0
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv4i1_0
+ ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMCLR_VL $x0
+ ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 4 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s1) = G_CONSTANT i1 0
+ %1:_(<vscale x 4 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 4 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv4i1_1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv4i1_1
+ ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 4 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: $v0 = COPY [[VMSET_VL1]](<vscale x 4 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s1) = G_CONSTANT i1 1
+ %1:_(<vscale x 4 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 4 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv4i1_2
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ liveins: $x10
+
+ ; CHECK-LABEL: name: splatvector_nxv4i1_2
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C1]]
+ ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[AND]], [[C]]
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[AND1]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[TRUNC]](s8)
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[C2]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[TRUNC1]](s8)
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 4 x s1>) = G_ICMP intpred(ne), [[SPLAT_VECTOR]](<vscale x 4 x s8>), [[SPLAT_VECTOR1]]
+ ; CHECK-NEXT: $v0 = COPY [[ICMP]](<vscale x 4 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s32) = COPY $x10
+ %1:_(s1) = G_TRUNC %0(s32)
+ %2:_(<vscale x 4 x s1>) = G_SPLAT_VECTOR %1(s1)
+ $v0 = COPY %2(<vscale x 4 x s1>)
+ PseudoRET implicit $v0
+...
+---
+name: splatvector_nxv8i1_0
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv8i1_0
+ ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 8 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 8 x s1>) = G_VMCLR_VL $x0
+ ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 8 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s1) = G_CONSTANT i1 0
+ %1:_(<vscale x 8 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 8 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv8i1_1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv8i1_1
+ ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 8 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 8 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: $v0 = COPY [[VMSET_VL1]](<vscale x 8 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s1) = G_CONSTANT i1 1
+ %1:_(<vscale x 8 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 8 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv8i1_2
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ liveins: $x10
+
+ ; CHECK-LABEL: name: splatvector_nxv8i1_2
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C1]]
+ ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[AND]], [[C]]
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[AND1]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[TRUNC]](s8)
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[C2]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[TRUNC1]](s8)
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 8 x s1>) = G_ICMP intpred(ne), [[SPLAT_VECTOR]](<vscale x 8 x s8>), [[SPLAT_VECTOR1]]
+ ; CHECK-NEXT: $v0 = COPY [[ICMP]](<vscale x 8 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s32) = COPY $x10
+ %1:_(s1) = G_TRUNC %0(s32)
+ %2:_(<vscale x 8 x s1>) = G_SPLAT_VECTOR %1(s1)
+ $v0 = COPY %2(<vscale x 8 x s1>)
+ PseudoRET implicit $v0
+...
+---
+name: splatvector_nxv16i1_0
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv16i1_0
+ ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 16 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 16 x s1>) = G_VMCLR_VL $x0
+ ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 16 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s1) = G_CONSTANT i1 0
+ %1:_(<vscale x 16 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 16 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv16i1_1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv16i1_1
+ ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 16 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 16 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: $v0 = COPY [[VMSET_VL1]](<vscale x 16 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s1) = G_CONSTANT i1 1
+ %1:_(<vscale x 16 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 16 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv16i1_2
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ liveins: $x10
+
+ ; CHECK-LABEL: name: splatvector_nxv16i1_2
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C1]]
+ ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[AND]], [[C]]
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[AND1]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[TRUNC]](s8)
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[C2]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[TRUNC1]](s8)
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 16 x s1>) = G_ICMP intpred(ne), [[SPLAT_VECTOR]](<vscale x 16 x s8>), [[SPLAT_VECTOR1]]
+ ; CHECK-NEXT: $v0 = COPY [[ICMP]](<vscale x 16 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s32) = COPY $x10
+ %1:_(s1) = G_TRUNC %0(s32)
+ %2:_(<vscale x 16 x s1>) = G_SPLAT_VECTOR %1(s1)
+ $v0 = COPY %2(<vscale x 16 x s1>)
+ PseudoRET implicit $v0
+...
+---
+name: splatvector_nxv32i1_0
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv32i1_0
+ ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 32 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 32 x s1>) = G_VMCLR_VL $x0
+ ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 32 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s1) = G_CONSTANT i1 0
+ %1:_(<vscale x 32 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 32 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv32i1_1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv32i1_1
+ ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 32 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 32 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: $v0 = COPY [[VMSET_VL1]](<vscale x 32 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s1) = G_CONSTANT i1 1
+ %1:_(<vscale x 32 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 32 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv32i1_2
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ liveins: $x10
+
+ ; CHECK-LABEL: name: splatvector_nxv32i1_2
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C1]]
+ ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[AND]], [[C]]
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[AND1]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[TRUNC]](s8)
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[C2]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[TRUNC1]](s8)
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 32 x s1>) = G_ICMP intpred(ne), [[SPLAT_VECTOR]](<vscale x 32 x s8>), [[SPLAT_VECTOR1]]
+ ; CHECK-NEXT: $v0 = COPY [[ICMP]](<vscale x 32 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s32) = COPY $x10
+ %1:_(s1) = G_TRUNC %0(s32)
+ %2:_(<vscale x 32 x s1>) = G_SPLAT_VECTOR %1(s1)
+ $v0 = COPY %2(<vscale x 32 x s1>)
+ PseudoRET implicit $v0
+...
+---
+name: splatvector_nxv64i1_0
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv64i1_0
+ ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 64 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 64 x s1>) = G_VMCLR_VL $x0
+ ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 64 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s1) = G_CONSTANT i1 0
+ %1:_(<vscale x 64 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 64 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv64i1_1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv64i1_1
+ ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 64 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 64 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: $v0 = COPY [[VMSET_VL1]](<vscale x 64 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s1) = G_CONSTANT i1 1
+ %1:_(<vscale x 64 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 64 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv64i1_2
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ liveins: $x10
+
+ ; CHECK-LABEL: name: splatvector_nxv64i1_2
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C1]]
+ ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[AND]], [[C]]
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[AND1]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[TRUNC]](s8)
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[C2]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[TRUNC1]](s8)
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 64 x s1>) = G_ICMP intpred(ne), [[SPLAT_VECTOR]](<vscale x 64 x s8>), [[SPLAT_VECTOR1]]
+ ; CHECK-NEXT: $v0 = COPY [[ICMP]](<vscale x 64 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s32) = COPY $x10
+ %1:_(s1) = G_TRUNC %0(s32)
+ %2:_(<vscale x 64 x s1>) = G_SPLAT_VECTOR %1(s1)
+ $v0 = COPY %2(<vscale x 64 x s1>)
+ PseudoRET implicit $v0
+...
+
+---
+name: splatvector_nxv1i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv1i8
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[C]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[TRUNC]](s8)
+ ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ %1:_(s8) = G_CONSTANT i8 0
+ %2:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR %1(s8)
+ $v8 = COPY %2(<vscale x 1 x s8>)
+ PseudoRET implicit $v8
+
+...
+
+---
+name: splatvector_nxv2i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv2i8
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[C]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[TRUNC]](s8)
+ ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ %1:_(s8) = G_CONSTANT i8 0
+ %2:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR %1(s8)
+ $v8 = COPY %2(<vscale x 2 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: splatvector_nxv4i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv4i8
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[C]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[TRUNC]](s8)
+ ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ %1:_(s8) = G_CONSTANT i8 0
+ %2:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR %1(s8)
+ $v8 = COPY %2(<vscale x 4 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: splatvector_nxv8i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv8i8
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[C]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[TRUNC]](s8)
+ ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ %1:_(s8) = G_CONSTANT i8 0
+ %2:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR %1(s8)
+ $v8 = COPY %2(<vscale x 8 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: splatvector_nxv16i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv16i8
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[C]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[TRUNC]](s8)
+ ; CHECK-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ %1:_(s8) = G_CONSTANT i8 0
+ %2:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR %1(s8)
+ $v8m2 = COPY %2(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: splatvector_nxv1i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv1i16
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[C]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[TRUNC]](s16)
+ ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ %1:_(s16) = G_CONSTANT i16 0
+ %2:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR %1(s16)
+ $v8 = COPY %2(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: splatvector_nxv2i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv2i16
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[C]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[TRUNC]](s16)
+ ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ %1:_(s16) = G_CONSTANT i16 0
+ %2:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR %1(s16)
+ $v8 = COPY %2(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: splatvector_nxv4i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv4i16
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[C]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[TRUNC]](s16)
+ ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ %1:_(s16) = G_CONSTANT i16 0
+ %2:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR %1(s16)
+ $v8 = COPY %2(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: splatvector_nxv8i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv8i16
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[C]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[TRUNC]](s16)
+ ; CHECK-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ %1:_(s16) = G_CONSTANT i16 0
+ %2:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR %1(s16)
+ $v8m2 = COPY %2(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: splatvector_nxv16i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv16i16
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[C]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[TRUNC]](s16)
+ ; CHECK-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ %1:_(s16) = G_CONSTANT i16 0
+ %2:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR %1(s16)
+ $v8m4 = COPY %2(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: splatvector_nxv1i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv1i32
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+ ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ %1:_(s32) = G_CONSTANT i32 0
+ %2:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR %1(s32)
+ $v8 = COPY %2(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: splatvector_nxv2i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv2i32
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+ ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ %1:_(s32) = G_CONSTANT i32 0
+ %2:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR %1(s32)
+ $v8 = COPY %2(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: splatvector_nxv4i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv4i32
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+ ; CHECK-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ %1:_(s32) = G_CONSTANT i32 0
+ %2:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR %1(s32)
+ $v8m2 = COPY %2(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: splatvector_nxv8i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv8i32
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+ ; CHECK-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ %1:_(s32) = G_CONSTANT i32 0
+ %2:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR %1(s32)
+ $v8m4 = COPY %2(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: splatvector_nxv16i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv16i32
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+ ; CHECK-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ %1:_(s32) = G_CONSTANT i32 0
+ %2:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR %1(s32)
+ $v8m8 = COPY %2(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+
+...
+
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-splatvector-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-splatvector-rv64.mir
new file mode 100644
index 00000000000000..430883f7b0a106
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-splatvector-rv64.mir
@@ -0,0 +1,566 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+
+-
+me: splatvector_nxv1i1_0
+galized: false
+acksRegLiveness: true
+dy: |
+bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv1i1_0
+ ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMCLR_VL $x0
+ ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 1 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s1) = G_CONSTANT i1 0
+ %1:_(<vscale x 1 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 1 x s1>)
+ PseudoRET implicit $v0
+
+.
+-
+me: splatvector_nxv1i1_1
+galized: false
+acksRegLiveness: true
+dy: |
+bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv1i1_1
+ ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[VMSET_VL1:%[0-9]+]]:_(<vscale x 1 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: $v0 = COPY [[VMSET_VL1]](<vscale x 1 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s1) = G_CONSTANT i1 1
+ %1:_(<vscale x 1 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 1 x s1>)
+ PseudoRET implicit $v0
+
+.
+-
+me: splatvector_nxv1i1_2
+galized: false
+acksRegLiveness: true
+dy: |
+bb.1:
+ liveins: $x10
+
+ ; CHECK-LABEL: name: splatvector_nxv1i1_2
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C1]]
+ ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[AND]], [[C]]
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[AND1]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[TRUNC]](s8)
+ ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[C2]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR1:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[TRUNC1]](s8)
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 1 x s1>) = G_ICMP intpred(ne), [[SPLAT_VECTOR]](<vscale x 1 x s8>), [[SPLAT_VECTOR1]]
+ ; CHECK-NEXT: $v0 = COPY [[ICMP]](<vscale x 1 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s64) = COPY $x10
+ %1:_(s1) = G_TRUNC %0(s64)
+ %2:_(<vscale x 1 x s1>) = G_SPLAT_VECTOR %1(s1)
+ $v0 = COPY %2(<vscale x 1 x s1>)
+ PseudoRET implicit $v0
+.
+-
+name: splatvector_nxv2i1_0
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splatvector_nxv2i1_0
+ ; CHECK: [[VMSET_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMSET_VL $x0
+ ; CHECK-NEXT: [[VMCLR_VL:%[0-9]+]]:_(<vscale x 2 x s1>) = G_VMCLR_VL $x0
+ ; CHECK-NEXT: $v0 = COPY [[VMCLR_VL]](<vscale x 2 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v0
+ %0:_(s1) = G_CONSTANT i1 0
+ %1:_(<vscale x 2 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 2 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv2i1_1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ %0:_(s1) = G_CONSTANT i1 1
+ %1:_(<vscale x 2 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 2 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv2i1_2
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ liveins: $x10
+
+ %0:_(s64) = COPY $x10
+ %1:_(s1) = G_TRUNC %0(s64)
+ %2:_(<vscale x 2 x s1>) = G_SPLAT_VECTOR %1(s1)
+ $v0 = COPY %2(<vscale x 2 x s1>)
+ PseudoRET implicit $v0
+...
+---
+name: splatvector_nxv4i1_0
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ %0:_(s1) = G_CONSTANT i1 0
+ %1:_(<vscale x 4 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 4 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv4i1_1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ %0:_(s1) = G_CONSTANT i1 1
+ %1:_(<vscale x 4 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 4 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv4i1_2
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ liveins: $x10
+
+ %0:_(s64) = COPY $x10
+ %1:_(s1) = G_TRUNC %0(s64)
+ %2:_(<vscale x 4 x s1>) = G_SPLAT_VECTOR %1(s1)
+ $v0 = COPY %2(<vscale x 4 x s1>)
+ PseudoRET implicit $v0
+...
+---
+name: splatvector_nxv8i1_0
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ %0:_(s1) = G_CONSTANT i1 0
+ %1:_(<vscale x 8 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 8 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv8i1_1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ %0:_(s1) = G_CONSTANT i1 1
+ %1:_(<vscale x 8 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 8 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv8i1_2
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ liveins: $x10
+
+ %0:_(s64) = COPY $x10
+ %1:_(s1) = G_TRUNC %0(s64)
+ %2:_(<vscale x 8 x s1>) = G_SPLAT_VECTOR %1(s1)
+ $v0 = COPY %2(<vscale x 8 x s1>)
+ PseudoRET implicit $v0
+...
+---
+name: splatvector_nxv16i1_0
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ %0:_(s1) = G_CONSTANT i1 0
+ %1:_(<vscale x 16 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 16 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv16i1_1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ %0:_(s1) = G_CONSTANT i1 1
+ %1:_(<vscale x 16 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 16 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv16i1_2
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ liveins: $x10
+
+ %0:_(s64) = COPY $x10
+ %1:_(s1) = G_TRUNC %0(s64)
+ %2:_(<vscale x 16 x s1>) = G_SPLAT_VECTOR %1(s1)
+ $v0 = COPY %2(<vscale x 16 x s1>)
+ PseudoRET implicit $v0
+...
+---
+name: splatvector_nxv32i1_0
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ %0:_(s1) = G_CONSTANT i1 0
+ %1:_(<vscale x 32 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 32 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv32i1_1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ %0:_(s1) = G_CONSTANT i1 1
+ %1:_(<vscale x 32 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 32 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv32i1_2
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ liveins: $x10
+
+ %0:_(s64) = COPY $x10
+ %1:_(s1) = G_TRUNC %0(s64)
+ %2:_(<vscale x 32 x s1>) = G_SPLAT_VECTOR %1(s1)
+ $v0 = COPY %2(<vscale x 32 x s1>)
+ PseudoRET implicit $v0
+...
+---
+name: splatvector_nxv64i1_0
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ %0:_(s1) = G_CONSTANT i1 0
+ %1:_(<vscale x 64 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 64 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv64i1_1
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ %0:_(s1) = G_CONSTANT i1 1
+ %1:_(<vscale x 64 x s1>) = G_SPLAT_VECTOR %0(s1)
+ $v0 = COPY %1(<vscale x 64 x s1>)
+ PseudoRET implicit $v0
+
+...
+---
+name: splatvector_nxv64i1_2
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ liveins: $x10
+
+ %0:_(s64) = COPY $x10
+ %1:_(s1) = G_TRUNC %0(s64)
+ %2:_(<vscale x 64 x s1>) = G_SPLAT_VECTOR %1(s1)
+ $v0 = COPY %2(<vscale x 64 x s1>)
+ PseudoRET implicit $v0
+...
+
+---
+name: splatvector_nxv1i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ %0:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ %1:_(s8) = G_CONSTANT i8 0
+ %2:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR %1(s8)
+ $v8 = COPY %2(<vscale x 1 x s8>)
+ PseudoRET implicit $v8
+
+...
+
+---
+name: splatvector_nxv2i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ %0:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ %1:_(s8) = G_CONSTANT i8 0
+ %2:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR %1(s8)
+ $v8 = COPY %2(<vscale x 2 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: splatvector_nxv4i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ %0:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ %1:_(s8) = G_CONSTANT i8 0
+ %2:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR %1(s8)
+ $v8 = COPY %2(<vscale x 4 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: splatvector_nxv8i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ %0:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ %1:_(s8) = G_CONSTANT i8 0
+ %2:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR %1(s8)
+ $v8 = COPY %2(<vscale x 8 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: splatvector_nxv16i8
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ %0:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ %1:_(s8) = G_CONSTANT i8 0
+ %2:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR %1(s8)
+ $v8m2 = COPY %2(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: splatvector_nxv1i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ %0:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ %1:_(s16) = G_CONSTANT i16 0
+ %2:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR %1(s16)
+ $v8 = COPY %2(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: splatvector_nxv2i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ %0:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ %1:_(s16) = G_CONSTANT i16 0
+ %2:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR %1(s16)
+ $v8 = COPY %2(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: splatvector_nxv4i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ %0:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ %1:_(s16) = G_CONSTANT i16 0
+ %2:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR %1(s16)
+ $v8 = COPY %2(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: splatvector_nxv8i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ %0:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ %1:_(s16) = G_CONSTANT i16 0
+ %2:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR %1(s16)
+ $v8m2 = COPY %2(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: splatvector_nxv16i16
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ %0:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ %1:_(s16) = G_CONSTANT i16 0
+ %2:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR %1(s16)
+ $v8m4 = COPY %2(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: splatvector_nxv1i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ %0:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ %1:_(s32) = G_CONSTANT i32 0
+ %2:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR %1(s32)
+ $v8 = COPY %2(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: splatvector_nxv2i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ %0:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ %1:_(s32) = G_CONSTANT i32 0
+ %2:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR %1(s32)
+ $v8 = COPY %2(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: splatvector_nxv4i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ %0:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ %1:_(s32) = G_CONSTANT i32 0
+ %2:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR %1(s32)
+ $v8m2 = COPY %2(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: splatvector_nxv8i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ %0:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ %1:_(s32) = G_CONSTANT i32 0
+ %2:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR %1(s32)
+ $v8m4 = COPY %2(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: splatvector_nxv16i32
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ %0:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ %1:_(s32) = G_CONSTANT i32 0
+ %2:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR %1(s32)
+ $v8m8 = COPY %2(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: splatvector_nxv1i64
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ %0:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ %1:_(s64) = G_CONSTANT i64 0
+ %2:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR %1(s64)
+ $v8 = COPY %2(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+
+...
+---
+name: splatvector_nxv2i64
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ %0:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ %1:_(s64) = G_CONSTANT i64 0
+ %2:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR %1(s64)
+ $v8m2 = COPY %2(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: splatvector_nxv4i64
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ %0:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ %1:_(s64) = G_CONSTANT i64 0
+ %2:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR %1(s64)
+ $v8m4 = COPY %2(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: splatvector_nxv8i64
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ %0:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ %1:_(s64) = G_CONSTANT i64 0
+ %2:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR %1(s64)
+ $v8m8 = COPY %2(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: splatvector_nxv16i64
+legalized: false
+tracksRegLiveness: true
+body: |
+ bb.1:
+ %0:_(<vscale x 16 x s64>) = G_IMPLICIT_DEF
+ %1:_(s64) = G_CONSTANT i64 0
+ %2:_(<vscale x 16 x s64>) = G_SPLAT_VECTOR %1(s64)
+ %3:_(<vscale x 8 x s64>), %4:_(<vscale x 8 x s64>) = G_UNMERGE_VALUES %2(<vscale x 16 x s64>)
+ $v8m8 = COPY %3(<vscale x 8 x s64>)
+ $v16m8 = COPY %4(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8, implicit $v16m8
+
+...
+
>From c35e99ae6ad6bfd04050571970b56074c880ddae Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Thu, 7 Mar 2024 10:40:08 -0800
Subject: [PATCH 5/9] [RISCV][GISEL] Legalize G_ICMP for scalable vector types
The result of a vector G_ICMP is a vector with an s1 element type. The
input vectors can be integer, floating-point, or boolean element type.
---
.../Target/RISCV/GISel/RISCVLegalizerInfo.cpp | 3 +
.../legalizer/rvv/legalize-icmp.mir | 468 ++++++++++++++++++
llvm/test/MachineVerifier/test_g_fcmp.mir | 7 +-
llvm/test/MachineVerifier/test_g_icmp.mir | 7 +-
4 files changed, 483 insertions(+), 2 deletions(-)
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-icmp.mir
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index f9055b789a87c2..241f6ad6c692a6 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -231,6 +231,9 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
getActionDefinitionsBuilder(G_ICMP)
.legalFor({{sXLen, sXLen}, {sXLen, p0}})
+ .widenScalarOrEltToNextPow2OrMinSize(1, 8)
+ .legalIf(all(typeIsLegalBoolVec(0, BoolVecTys, ST),
+ typeIsLegalIntOrFPVec(1, IntOrFPVecTys, ST)))
.widenScalarToNextPow2(1)
.clampScalar(1, sXLen, sXLen)
.clampScalar(0, sXLen, sXLen);
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-icmp.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-icmp.mir
new file mode 100644
index 00000000000000..dd14786ec30c22
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-icmp.mir
@@ -0,0 +1,468 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+
+---
+name: icmp_nxv1i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: icmp_nxv1i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 1 x s1>), [[DEF]]
+ ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 1 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv2i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: icmp_nxv2i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 2 x s1>), [[DEF]]
+ ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 2 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv4i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: icmp_nxv4i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 4 x s1>), [[DEF]]
+ ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 4 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv8i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: icmp_nxv8i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 8 x s1>), [[DEF]]
+ ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 8 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv16i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: icmp_nxv16i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 16 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 16 x s1>), [[DEF]]
+ ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 16 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 16 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 16 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv32i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: icmp_nxv32i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 32 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 32 x s1>), [[DEF]]
+ ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 32 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 32 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 32 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv64i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: icmp_nxv64i1
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 64 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 64 x s1>), [[DEF]]
+ ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 64 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 64 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 64 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv1i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: icmp_nxv1i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 1 x s8>), [[DEF]]
+ ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 1 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv2i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: icmp_nxv2i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 2 x s8>), [[DEF]]
+ ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 2 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv4i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: icmp_nxv4i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 4 x s8>), [[DEF]]
+ ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 4 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv8i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: icmp_nxv8i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 8 x s8>), [[DEF]]
+ ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 8 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv16i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: icmp_nxv16i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 16 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 16 x s8>), [[DEF]]
+ ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 16 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 16 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 16 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv32i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: icmp_nxv32i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 32 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 32 x s8>), [[DEF]]
+ ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 32 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 32 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 32 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv64i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: icmp_nxv64i8
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 64 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 64 x s8>), [[DEF]]
+ ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 64 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 64 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 64 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv1i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: icmp_nxv1i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 1 x s16>), [[DEF]]
+ ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 1 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv2i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: icmp_nxv2i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 2 x s16>), [[DEF]]
+ ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 2 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv4i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: icmp_nxv4i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 4 x s16>), [[DEF]]
+ ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 4 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv8i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: icmp_nxv8i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 8 x s16>), [[DEF]]
+ ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 8 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv16i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: icmp_nxv16i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 16 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 16 x s16>), [[DEF]]
+ ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 16 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 16 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 16 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv32i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: icmp_nxv32i16
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 32 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 32 x s16>), [[DEF]]
+ ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 32 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 32 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 32 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv1i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: icmp_nxv1i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 1 x s32>), [[DEF]]
+ ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 1 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv2i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: icmp_nxv2i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 2 x s32>), [[DEF]]
+ ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 2 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv4i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: icmp_nxv4i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 4 x s32>), [[DEF]]
+ ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 4 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv8i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: icmp_nxv8i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 8 x s32>), [[DEF]]
+ ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 8 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv16i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: icmp_nxv16i32
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 16 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 16 x s32>), [[DEF]]
+ ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 16 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 16 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 16 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv1i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: icmp_nxv1i64
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 1 x s64>), [[DEF]]
+ ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 1 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv2i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: icmp_nxv2i64
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 2 x s64>), [[DEF]]
+ ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 2 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv4i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: icmp_nxv4i64
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 4 x s64>), [[DEF]]
+ ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 4 x s1>)
+ PseudoRET implicit $v8
+...
+---
+name: icmp_nxv8i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: icmp_nxv8i64
+ ; CHECK: [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 8 x s64>), [[DEF]]
+ ; CHECK-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), %0, %0
+ $v8 = COPY %1(<vscale x 8 x s1>)
+ PseudoRET implicit $v8
+...
diff --git a/llvm/test/MachineVerifier/test_g_fcmp.mir b/llvm/test/MachineVerifier/test_g_fcmp.mir
index 9a73569cb4abd0..2052acdd99ebb6 100644
--- a/llvm/test/MachineVerifier/test_g_fcmp.mir
+++ b/llvm/test/MachineVerifier/test_g_fcmp.mir
@@ -24,12 +24,17 @@ body: |
%4:_(<2 x s32>) = G_IMPLICIT_DEF
%5:_(s1) = G_FCMP floatpred(oeq), %3, %4
- ; mismatched element count
+ ; mismatched fixed element count
; CHECK: Bad machine code: Generic vector icmp/fcmp must preserve number of
%6:_(<2 x s32>) = G_IMPLICIT_DEF
%7:_(<2 x s32>) = G_IMPLICIT_DEF
%8:_(<4 x s1>) = G_FCMP floatpred(oeq), %6, %7
+ ; mismatched scalable element count
+ ; CHECK: Bad machine code: Generic vector icmp/fcmp must preserve number of
+ %6:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ %7:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ %8:_(<vscale x 4 x s1>) = G_FCMP floatpred(oeq), %6, %7
; mismatched scalar element type
; CHECK: *** Bad machine code: Type mismatch in generic instruction ***
diff --git a/llvm/test/MachineVerifier/test_g_icmp.mir b/llvm/test/MachineVerifier/test_g_icmp.mir
index 7c64e25b225246..23960a49dbb1ad 100644
--- a/llvm/test/MachineVerifier/test_g_icmp.mir
+++ b/llvm/test/MachineVerifier/test_g_icmp.mir
@@ -24,12 +24,17 @@ body: |
%4:_(<2 x s32>) = G_IMPLICIT_DEF
%5:_(s1) = G_ICMP intpred(eq), %3, %4
- ; mismatched element count
+ ; mismatched fixed element count
; CHECK: Bad machine code: Generic vector icmp/fcmp must preserve number of
%6:_(<2 x s32>) = G_IMPLICIT_DEF
%7:_(<2 x s32>) = G_IMPLICIT_DEF
%8:_(<4 x s1>) = G_ICMP intpred(eq), %6, %7
+ ; mismatched scalable element count
+ ; CHECK: Bad machine code: Generic vector icmp/fcmp must preserve number of
+ %6:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ %7:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ %8:_(<vscale x 4 x s1>) = G_ICMP intpred(eq), %6, %7
; mismatched scalar element type
; CHECK: *** Bad machine code: Type mismatch in generic instruction ***
>From de042c4a98817235fa050fbdc11e357d7abf3c93 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Thu, 14 Mar 2024 07:20:23 -0700
Subject: [PATCH 6/9] [RISCV][GISEL] Regbank select for scalable vector G_ICMP
---
.../RISCV/GISel/RISCVRegisterBankInfo.cpp | 11 +
.../GlobalISel/regbankselect/rvv/icmp.mir | 675 ++++++++++++++++++
2 files changed, 686 insertions(+)
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/icmp.mir
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
index aae4e37cf08f66..3201a89f526656 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
@@ -488,6 +488,17 @@ RISCVRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
OpdsMapping[1] = GPRValueMapping;
break;
}
+ case TargetOpcode::G_ICMP: {
+ if (MRI.getType(MI.getOperand(0).getReg()).isVector()) {
+ LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
+ LLT SrcTy = MRI.getType(MI.getOperand(2).getReg());
+ OpdsMapping[0] =
+ getVRBValueMapping(DstTy.getSizeInBits().getKnownMinValue());
+ OpdsMapping[2] = OpdsMapping[3] =
+ getVRBValueMapping(SrcTy.getSizeInBits().getKnownMinValue());
+ }
+ break;
+ }
case TargetOpcode::G_FCMP: {
LLT Ty = MRI.getType(MI.getOperand(2).getReg());
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/icmp.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/icmp.mir
new file mode 100644
index 00000000000000..925d6aee47490c
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/icmp.mir
@@ -0,0 +1,675 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+m,+v -run-pass=regbankselect \
+# RUN: -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN: -o - | FileCheck -check-prefix=RV32I %s
+# RUN: llc -mtriple=riscv64 -mattr=+m,+v -run-pass=regbankselect \
+# RUN: -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN: -o - | FileCheck -check-prefix=RV64I %s
+
+---
+name: icmp_nxv1i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv1i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 1 x s1>), [[DEF]]
+ ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv1i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 1 x s1>), [[DEF]]
+ ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 1 x s1>), %0
+ $v8 = COPY %1(<vscale x 1 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: icmp_nxv2i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv2i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 2 x s1>), [[DEF]]
+ ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv2i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 2 x s1>), [[DEF]]
+ ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 2 x s1>), %0
+ $v8 = COPY %1(<vscale x 2 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: icmp_nxv4i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv4i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 4 x s1>), [[DEF]]
+ ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv4i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 4 x s1>), [[DEF]]
+ ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 4 x s1>), %0
+ $v8 = COPY %1(<vscale x 4 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: icmp_nxv8i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv8i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 8 x s1>), [[DEF]]
+ ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv8i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 8 x s1>), [[DEF]]
+ ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 8 x s1>), %0
+ $v8 = COPY %1(<vscale x 8 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: icmp_nxv16i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv16i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 16 x s1>), [[DEF]]
+ ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 16 x s1>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv16i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 16 x s1>), [[DEF]]
+ ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 16 x s1>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 16 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 16 x s1>), %0
+ $v8 = COPY %1(<vscale x 16 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: icmp_nxv32i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv32i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 32 x s1>), [[DEF]]
+ ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 32 x s1>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv32i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 32 x s1>), [[DEF]]
+ ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 32 x s1>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 32 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 32 x s1>), %0
+ $v8 = COPY %1(<vscale x 32 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: icmp_nxv64i1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv64i1
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 64 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 64 x s1>), [[DEF]]
+ ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 64 x s1>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv64i1
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 64 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 64 x s1>), [[DEF]]
+ ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 64 x s1>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 64 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 64 x s1>), %0
+ $v8 = COPY %1(<vscale x 64 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: icmp_nxv1i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv1i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 1 x s8>), [[DEF]]
+ ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv1i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 1 x s8>), [[DEF]]
+ ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 1 x s8>), %0
+ $v8 = COPY %1(<vscale x 1 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: icmp_nxv2i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv2i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 2 x s8>), [[DEF]]
+ ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv2i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 2 x s8>), [[DEF]]
+ ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 2 x s8>), %0
+ $v8 = COPY %1(<vscale x 2 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: icmp_nxv4i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv4i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 4 x s8>), [[DEF]]
+ ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv4i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 4 x s8>), [[DEF]]
+ ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 4 x s8>), %0
+ $v8 = COPY %1(<vscale x 4 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: icmp_nxv8i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv8i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 8 x s8>), [[DEF]]
+ ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv8i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 8 x s8>), [[DEF]]
+ ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 8 x s8>), %0
+ $v8 = COPY %1(<vscale x 8 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: icmp_nxv16i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv16i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 16 x s8>), [[DEF]]
+ ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 16 x s1>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv16i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 16 x s8>), [[DEF]]
+ ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 16 x s1>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 16 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 16 x s8>), %0
+ $v8 = COPY %1(<vscale x 16 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: icmp_nxv32i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv32i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 32 x s8>), [[DEF]]
+ ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 32 x s1>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv32i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 32 x s8>), [[DEF]]
+ ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 32 x s1>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 32 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 32 x s8>), %0
+ $v8 = COPY %1(<vscale x 32 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: icmp_nxv64i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv64i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 64 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 64 x s8>), [[DEF]]
+ ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 64 x s1>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv64i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 64 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 64 x s8>), [[DEF]]
+ ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 64 x s1>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 64 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 64 x s8>), %0
+ $v8 = COPY %1(<vscale x 64 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: icmp_nxv1i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv1i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 1 x s16>), [[DEF]]
+ ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv1i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 1 x s16>), [[DEF]]
+ ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 1 x s16>), %0
+ $v8 = COPY %1(<vscale x 1 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: icmp_nxv2i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv2i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 2 x s16>), [[DEF]]
+ ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv2i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 2 x s16>), [[DEF]]
+ ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 2 x s16>), %0
+ $v8 = COPY %1(<vscale x 2 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: icmp_nxv4i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv4i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 4 x s16>), [[DEF]]
+ ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv4i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 4 x s16>), [[DEF]]
+ ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 4 x s16>), %0
+ $v8 = COPY %1(<vscale x 4 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: icmp_nxv8i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv8i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 8 x s16>), [[DEF]]
+ ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv8i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 8 x s16>), [[DEF]]
+ ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 8 x s16>), %0
+ $v8 = COPY %1(<vscale x 8 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: icmp_nxv16i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv16i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 16 x s16>), [[DEF]]
+ ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 16 x s1>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv16i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 16 x s16>), [[DEF]]
+ ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 16 x s1>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 16 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 16 x s16>), %0
+ $v8 = COPY %1(<vscale x 16 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: icmp_nxv32i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv32i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 32 x s16>), [[DEF]]
+ ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 32 x s1>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv32i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 32 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 32 x s16>), [[DEF]]
+ ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 32 x s1>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 32 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 32 x s16>), %0
+ $v8 = COPY %1(<vscale x 32 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: icmp_nxv1i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv1i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 1 x s32>), [[DEF]]
+ ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv1i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 1 x s32>), [[DEF]]
+ ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 1 x s32>), %0
+ $v8 = COPY %1(<vscale x 1 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: icmp_nxv2i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv2i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 2 x s32>), [[DEF]]
+ ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv2i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 2 x s32>), [[DEF]]
+ ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 2 x s32>), %0
+ $v8 = COPY %1(<vscale x 2 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: icmp_nxv4i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv4i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 4 x s32>), [[DEF]]
+ ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv4i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 4 x s32>), [[DEF]]
+ ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 4 x s32>), %0
+ $v8 = COPY %1(<vscale x 4 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: icmp_nxv8i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv8i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 8 x s32>), [[DEF]]
+ ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv8i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 8 x s32>), [[DEF]]
+ ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 8 x s32>), %0
+ $v8 = COPY %1(<vscale x 8 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: icmp_nxv16i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv16i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 16 x s32>), [[DEF]]
+ ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 16 x s1>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv16i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 16 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 16 x s32>), [[DEF]]
+ ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 16 x s1>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 16 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 16 x s32>), %0
+ $v8 = COPY %1(<vscale x 16 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: icmp_nxv1i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv1i64
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 1 x s64>), [[DEF]]
+ ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv1i64
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 1 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 1 x s64>), [[DEF]]
+ ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 1 x s1>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 1 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 1 x s64>), %0
+ $v8 = COPY %1(<vscale x 1 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: icmp_nxv2i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv2i64
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 2 x s64>), [[DEF]]
+ ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv2i64
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 2 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 2 x s64>), [[DEF]]
+ ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 2 x s1>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 2 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 2 x s64>), %0
+ $v8 = COPY %1(<vscale x 2 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: icmp_nxv4i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv4i64
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 4 x s64>), [[DEF]]
+ ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv4i64
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 4 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 4 x s64>), [[DEF]]
+ ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 4 x s1>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 4 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 4 x s64>), %0
+ $v8 = COPY %1(<vscale x 4 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: icmp_nxv8i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv8i64
+ ; RV32I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; RV32I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 8 x s64>), [[DEF]]
+ ; RV32I-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv8i64
+ ; RV64I: [[DEF:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ ; RV64I-NEXT: [[ICMP:%[0-9]+]]:vrb(<vscale x 8 x s1>) = G_ICMP intpred(sgt), [[DEF]](<vscale x 8 x s64>), [[DEF]]
+ ; RV64I-NEXT: $v8 = COPY [[ICMP]](<vscale x 8 x s1>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ %1:_(<vscale x 8 x s1>) = G_ICMP intpred(sgt), %0(<vscale x 8 x s64>), %0
+ $v8 = COPY %1(<vscale x 8 x s1>)
+ PseudoRET implicit $v8
+
+...
>From deb225939f7a586d0d8c0abecb3d45fd5c652fbc Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Fri, 15 Mar 2024 10:00:22 -0700
Subject: [PATCH 7/9] [RISCV][GISEL] Instruction selection for G_ICMP
---
.../instruction-select/rvv/icmp.mir | 534 ++++++++++++++++++
1 file changed, 534 insertions(+)
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/icmp.mir
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/icmp.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/icmp.mir
new file mode 100644
index 00000000000000..05cc64948f2aaa
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/icmp.mir
@@ -0,0 +1,534 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - | FileCheck -check-prefix=RV32I %s
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - | FileCheck -check-prefix=RV64I %s
+
+# Don't test i1 element types here since they have been widened to i8 in legalization
+
+---
+name: icmp_nxv1i8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv1i8
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVMSLTU_VV_MF8_:%[0-9]+]]:vr = PseudoVMSLTU_VV_MF8 [[DEF]], [[DEF]], -1, 3 /* e8 */
+ ; RV32I-NEXT: $v8 = COPY [[PseudoVMSLTU_VV_MF8_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv1i8
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVMSLTU_VV_MF8_:%[0-9]+]]:vr = PseudoVMSLTU_VV_MF8 [[DEF]], [[DEF]], -1, 3 /* e8 */
+ ; RV64I-NEXT: $v8 = COPY [[PseudoVMSLTU_VV_MF8_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 1 x s1>) = G_ICMP intpred(ult), %0(<vscale x 1 x s8>), %0
+ $v8 = COPY %1(<vscale x 1 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: icmp_nxv2i8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv2i8
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVMSLTU_VV_MF4_:%[0-9]+]]:vr = PseudoVMSLTU_VV_MF4 [[DEF]], [[DEF]], -1, 3 /* e8 */
+ ; RV32I-NEXT: $v8 = COPY [[PseudoVMSLTU_VV_MF4_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv2i8
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVMSLTU_VV_MF4_:%[0-9]+]]:vr = PseudoVMSLTU_VV_MF4 [[DEF]], [[DEF]], -1, 3 /* e8 */
+ ; RV64I-NEXT: $v8 = COPY [[PseudoVMSLTU_VV_MF4_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 2 x s1>) = G_ICMP intpred(ult), %0(<vscale x 2 x s8>), %0
+ $v8 = COPY %1(<vscale x 2 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: icmp_nxv4i8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv4i8
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVMSLTU_VV_MF2_:%[0-9]+]]:vr = PseudoVMSLTU_VV_MF2 [[DEF]], [[DEF]], -1, 3 /* e8 */
+ ; RV32I-NEXT: $v8 = COPY [[PseudoVMSLTU_VV_MF2_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv4i8
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVMSLTU_VV_MF2_:%[0-9]+]]:vr = PseudoVMSLTU_VV_MF2 [[DEF]], [[DEF]], -1, 3 /* e8 */
+ ; RV64I-NEXT: $v8 = COPY [[PseudoVMSLTU_VV_MF2_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 4 x s1>) = G_ICMP intpred(ult), %0(<vscale x 4 x s8>), %0
+ $v8 = COPY %1(<vscale x 4 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: icmp_nxv8i8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv8i8
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVMSLTU_VV_M1_:%[0-9]+]]:vr = PseudoVMSLTU_VV_M1 [[DEF]], [[DEF]], -1, 3 /* e8 */
+ ; RV32I-NEXT: $v8 = COPY [[PseudoVMSLTU_VV_M1_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv8i8
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVMSLTU_VV_M1_:%[0-9]+]]:vr = PseudoVMSLTU_VV_M1 [[DEF]], [[DEF]], -1, 3 /* e8 */
+ ; RV64I-NEXT: $v8 = COPY [[PseudoVMSLTU_VV_M1_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 8 x s1>) = G_ICMP intpred(ult), %0(<vscale x 8 x s8>), %0
+ $v8 = COPY %1(<vscale x 8 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: icmp_nxv16i8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv16i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vr = PseudoVMSLTU_VV_M2 [[DEF]], [[DEF]], -1, 3 /* e8 */
+ ; RV32I-NEXT: $v8 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv16i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vr = PseudoVMSLTU_VV_M2 [[DEF]], [[DEF]], -1, 3 /* e8 */
+ ; RV64I-NEXT: $v8 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 16 x s1>) = G_ICMP intpred(ult), %0(<vscale x 16 x s8>), %0
+ $v8 = COPY %1(<vscale x 16 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: icmp_nxv32i8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv32i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vr = PseudoVMSLTU_VV_M4 [[DEF]], [[DEF]], -1, 3 /* e8 */
+ ; RV32I-NEXT: $v8 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv32i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vr = PseudoVMSLTU_VV_M4 [[DEF]], [[DEF]], -1, 3 /* e8 */
+ ; RV64I-NEXT: $v8 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 32 x s1>) = G_ICMP intpred(ult), %0(<vscale x 32 x s8>), %0
+ $v8 = COPY %1(<vscale x 32 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: icmp_nxv64i8
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv64i8
+ ; RV32I: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vr = PseudoVMSLTU_VV_M8 [[DEF]], [[DEF]], -1, 3 /* e8 */
+ ; RV32I-NEXT: $v8 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv64i8
+ ; RV64I: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vr = PseudoVMSLTU_VV_M8 [[DEF]], [[DEF]], -1, 3 /* e8 */
+ ; RV64I-NEXT: $v8 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 64 x s1>) = G_ICMP intpred(ult), %0(<vscale x 64 x s8>), %0
+ $v8 = COPY %1(<vscale x 64 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: icmp_nxv1i16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv1i16
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVMSLTU_VV_MF4_:%[0-9]+]]:vr = PseudoVMSLTU_VV_MF4 [[DEF]], [[DEF]], -1, 4 /* e16 */
+ ; RV32I-NEXT: $v8 = COPY [[PseudoVMSLTU_VV_MF4_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv1i16
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVMSLTU_VV_MF4_:%[0-9]+]]:vr = PseudoVMSLTU_VV_MF4 [[DEF]], [[DEF]], -1, 4 /* e16 */
+ ; RV64I-NEXT: $v8 = COPY [[PseudoVMSLTU_VV_MF4_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 1 x s1>) = G_ICMP intpred(ult), %0(<vscale x 1 x s16>), %0
+ $v8 = COPY %1(<vscale x 1 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: icmp_nxv2i16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv2i16
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVMSLTU_VV_MF2_:%[0-9]+]]:vr = PseudoVMSLTU_VV_MF2 [[DEF]], [[DEF]], -1, 4 /* e16 */
+ ; RV32I-NEXT: $v8 = COPY [[PseudoVMSLTU_VV_MF2_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv2i16
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVMSLTU_VV_MF2_:%[0-9]+]]:vr = PseudoVMSLTU_VV_MF2 [[DEF]], [[DEF]], -1, 4 /* e16 */
+ ; RV64I-NEXT: $v8 = COPY [[PseudoVMSLTU_VV_MF2_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 2 x s1>) = G_ICMP intpred(ult), %0(<vscale x 2 x s16>), %0
+ $v8 = COPY %1(<vscale x 2 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: icmp_nxv4i16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv4i16
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVMSLTU_VV_M1_:%[0-9]+]]:vr = PseudoVMSLTU_VV_M1 [[DEF]], [[DEF]], -1, 4 /* e16 */
+ ; RV32I-NEXT: $v8 = COPY [[PseudoVMSLTU_VV_M1_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv4i16
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVMSLTU_VV_M1_:%[0-9]+]]:vr = PseudoVMSLTU_VV_M1 [[DEF]], [[DEF]], -1, 4 /* e16 */
+ ; RV64I-NEXT: $v8 = COPY [[PseudoVMSLTU_VV_M1_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 4 x s1>) = G_ICMP intpred(ult), %0(<vscale x 4 x s16>), %0
+ $v8 = COPY %1(<vscale x 4 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: icmp_nxv8i16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv8i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vr = PseudoVMSLTU_VV_M2 [[DEF]], [[DEF]], -1, 4 /* e16 */
+ ; RV32I-NEXT: $v8 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv8i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vr = PseudoVMSLTU_VV_M2 [[DEF]], [[DEF]], -1, 4 /* e16 */
+ ; RV64I-NEXT: $v8 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 8 x s1>) = G_ICMP intpred(ult), %0(<vscale x 8 x s16>), %0
+ $v8 = COPY %1(<vscale x 8 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: icmp_nxv16i16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv16i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vr = PseudoVMSLTU_VV_M4 [[DEF]], [[DEF]], -1, 4 /* e16 */
+ ; RV32I-NEXT: $v8 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv16i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vr = PseudoVMSLTU_VV_M4 [[DEF]], [[DEF]], -1, 4 /* e16 */
+ ; RV64I-NEXT: $v8 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 16 x s1>) = G_ICMP intpred(ult), %0(<vscale x 16 x s16>), %0
+ $v8 = COPY %1(<vscale x 16 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: icmp_nxv32i16
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv32i16
+ ; RV32I: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vr = PseudoVMSLTU_VV_M8 [[DEF]], [[DEF]], -1, 4 /* e16 */
+ ; RV32I-NEXT: $v8 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv32i16
+ ; RV64I: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vr = PseudoVMSLTU_VV_M8 [[DEF]], [[DEF]], -1, 4 /* e16 */
+ ; RV64I-NEXT: $v8 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 32 x s1>) = G_ICMP intpred(ult), %0(<vscale x 32 x s16>), %0
+ $v8 = COPY %1(<vscale x 32 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: icmp_nxv1i32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv1i32
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVMSLTU_VV_MF2_:%[0-9]+]]:vr = PseudoVMSLTU_VV_MF2 [[DEF]], [[DEF]], -1, 5 /* e32 */
+ ; RV32I-NEXT: $v8 = COPY [[PseudoVMSLTU_VV_MF2_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv1i32
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVMSLTU_VV_MF2_:%[0-9]+]]:vr = PseudoVMSLTU_VV_MF2 [[DEF]], [[DEF]], -1, 5 /* e32 */
+ ; RV64I-NEXT: $v8 = COPY [[PseudoVMSLTU_VV_MF2_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 1 x s1>) = G_ICMP intpred(ult), %0(<vscale x 1 x s32>), %0
+ $v8 = COPY %1(<vscale x 1 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: icmp_nxv2i32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv2i32
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVMSLTU_VV_M1_:%[0-9]+]]:vr = PseudoVMSLTU_VV_M1 [[DEF]], [[DEF]], -1, 5 /* e32 */
+ ; RV32I-NEXT: $v8 = COPY [[PseudoVMSLTU_VV_M1_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv2i32
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVMSLTU_VV_M1_:%[0-9]+]]:vr = PseudoVMSLTU_VV_M1 [[DEF]], [[DEF]], -1, 5 /* e32 */
+ ; RV64I-NEXT: $v8 = COPY [[PseudoVMSLTU_VV_M1_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 2 x s1>) = G_ICMP intpred(ult), %0(<vscale x 2 x s32>), %0
+ $v8 = COPY %1(<vscale x 2 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: icmp_nxv4i32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv4i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vr = PseudoVMSLTU_VV_M2 [[DEF]], [[DEF]], -1, 5 /* e32 */
+ ; RV32I-NEXT: $v8 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv4i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vr = PseudoVMSLTU_VV_M2 [[DEF]], [[DEF]], -1, 5 /* e32 */
+ ; RV64I-NEXT: $v8 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 4 x s1>) = G_ICMP intpred(ult), %0(<vscale x 4 x s32>), %0
+ $v8 = COPY %1(<vscale x 4 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: icmp_nxv8i32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv8i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vr = PseudoVMSLTU_VV_M4 [[DEF]], [[DEF]], -1, 5 /* e32 */
+ ; RV32I-NEXT: $v8 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv8i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vr = PseudoVMSLTU_VV_M4 [[DEF]], [[DEF]], -1, 5 /* e32 */
+ ; RV64I-NEXT: $v8 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 8 x s1>) = G_ICMP intpred(ult), %0(<vscale x 8 x s32>), %0
+ $v8 = COPY %1(<vscale x 8 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: icmp_nxv16i32
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv16i32
+ ; RV32I: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vr = PseudoVMSLTU_VV_M8 [[DEF]], [[DEF]], -1, 5 /* e32 */
+ ; RV32I-NEXT: $v8 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv16i32
+ ; RV64I: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vr = PseudoVMSLTU_VV_M8 [[DEF]], [[DEF]], -1, 5 /* e32 */
+ ; RV64I-NEXT: $v8 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 16 x s1>) = G_ICMP intpred(ult), %0(<vscale x 16 x s32>), %0
+ $v8 = COPY %1(<vscale x 16 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: icmp_nxv1i64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv1i64
+ ; RV32I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV32I-NEXT: [[PseudoVMSLTU_VV_M1_:%[0-9]+]]:vr = PseudoVMSLTU_VV_M1 [[DEF]], [[DEF]], -1, 6 /* e64 */
+ ; RV32I-NEXT: $v8 = COPY [[PseudoVMSLTU_VV_M1_]]
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv1i64
+ ; RV64I: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; RV64I-NEXT: [[PseudoVMSLTU_VV_M1_:%[0-9]+]]:vr = PseudoVMSLTU_VV_M1 [[DEF]], [[DEF]], -1, 6 /* e64 */
+ ; RV64I-NEXT: $v8 = COPY [[PseudoVMSLTU_VV_M1_]]
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 1 x s1>) = G_ICMP intpred(ult), %0(<vscale x 1 x s64>), %0
+ $v8 = COPY %1(<vscale x 1 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: icmp_nxv2i64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv2i64
+ ; RV32I: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vr = PseudoVMSLTU_VV_M2 [[DEF]], [[DEF]], -1, 6 /* e64 */
+ ; RV32I-NEXT: $v8 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv2i64
+ ; RV64I: [[DEF:%[0-9]+]]:vrm2 = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vr = PseudoVMSLTU_VV_M2 [[DEF]], [[DEF]], -1, 6 /* e64 */
+ ; RV64I-NEXT: $v8 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 2 x s1>) = G_ICMP intpred(ult), %0(<vscale x 2 x s64>), %0
+ $v8 = COPY %1(<vscale x 2 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: icmp_nxv4i64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv4i64
+ ; RV32I: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vr = PseudoVMSLTU_VV_M4 [[DEF]], [[DEF]], -1, 6 /* e64 */
+ ; RV32I-NEXT: $v8 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv4i64
+ ; RV64I: [[DEF:%[0-9]+]]:vrm4 = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vr = PseudoVMSLTU_VV_M4 [[DEF]], [[DEF]], -1, 6 /* e64 */
+ ; RV64I-NEXT: $v8 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 4 x s1>) = G_ICMP intpred(ult), %0(<vscale x 4 x s64>), %0
+ $v8 = COPY %1(<vscale x 4 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: icmp_nxv8i64
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ ; RV32I-LABEL: name: icmp_nxv8i64
+ ; RV32I: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV32I-NEXT: early-clobber %1:vr = PseudoVMSLTU_VV_M8 [[DEF]], [[DEF]], -1, 6 /* e64 */
+ ; RV32I-NEXT: $v8 = COPY %1
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: icmp_nxv8i64
+ ; RV64I: [[DEF:%[0-9]+]]:vrm8 = IMPLICIT_DEF
+ ; RV64I-NEXT: early-clobber %1:vr = PseudoVMSLTU_VV_M8 [[DEF]], [[DEF]], -1, 6 /* e64 */
+ ; RV64I-NEXT: $v8 = COPY %1
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:vrb(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+ %1:vrb(<vscale x 8 x s1>) = G_ICMP intpred(ult), %0(<vscale x 8 x s64>), %0
+ $v8 = COPY %1(<vscale x 8 x s1>)
+ PseudoRET implicit $v8
+
+...
>From e74182a96a95f4e074ddcb946bec068b8c5bbaec Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Mon, 25 Mar 2024 11:01:41 -0700
Subject: [PATCH 8/9] remove unused
---
llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp | 2 --
1 file changed, 2 deletions(-)
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 241f6ad6c692a6..23c7df599c1818 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -600,8 +600,6 @@ bool RISCVLegalizerInfo::legalizeExt(MachineInstr &MI,
Register Src = MI.getOperand(1).getReg();
LLT DstTy = MRI.getType(Dst);
- LLT SrcTy = MRI.getType(Src);
-
int64_t ExtTrueVal =
Opc == TargetOpcode::G_ZEXT || Opc == TargetOpcode::G_ANYEXT ? 1 : -1;
LLT DstEltTy = DstTy.getElementType();
>From 74d9218b8dcfd69f0693c4b00034fd760007c7ae Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Mon, 25 Mar 2024 11:39:49 -0700
Subject: [PATCH 9/9] [FIXES]
---
.../Target/RISCV/GISel/RISCVLegalizerInfo.cpp | 19 ++++++++++++-------
.../RISCV/GISel/RISCVRegisterBankInfo.cpp | 14 ++++++++++++--
llvm/test/MachineVerifier/test_g_fcmp.mir | 12 ++++++------
llvm/test/MachineVerifier/test_g_icmp.mir | 12 ++++++------
4 files changed, 36 insertions(+), 21 deletions(-)
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 23c7df599c1818..a63ca1c9e1b44e 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -139,20 +139,25 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
.clampScalar(0, s32, sXLen)
.minScalarSameAs(1, 0);
- auto &ExtActions =
- getActionDefinitionsBuilder({G_ZEXT, G_SEXT, G_ANYEXT})
- .customIf(typeIsLegalBoolVec(1, BoolVecTys, ST))
- .legalIf(all(typeIsLegalIntOrFPVec(0, IntOrFPVecTys, ST),
- typeIsLegalIntOrFPVec(1, IntOrFPVecTys, ST)))
- .maxScalar(0, sXLen);
if (ST.is64Bit()) {
- ExtActions.legalFor({{sXLen, s32}});
+ getActionDefinitionsBuilder({G_ZEXT, G_SEXT, G_ANYEXT})
+ .customIf(typeIsLegalBoolVec(1, BoolVecTys, ST))
+ .legalIf(all(typeIsLegalIntOrFPVec(0, IntOrFPVecTys, ST),
+ typeIsLegalIntOrFPVec(1, IntOrFPVecTys, ST)))
+ .legalFor({{sXLen, s32}})
+ .maxScalar(0, sXLen);
getActionDefinitionsBuilder(G_SEXT_INREG)
.customFor({sXLen})
.maxScalar(0, sXLen)
.lower();
} else {
+ getActionDefinitionsBuilder({G_ZEXT, G_SEXT, G_ANYEXT})
+ .customIf(typeIsLegalBoolVec(1, BoolVecTys, ST))
+ .legalIf(all(typeIsLegalIntOrFPVec(0, IntOrFPVecTys, ST),
+ typeIsLegalIntOrFPVec(1, IntOrFPVecTys, ST)))
+ .maxScalar(0, sXLen);
+
getActionDefinitionsBuilder(G_SEXT_INREG).maxScalar(0, sXLen).lower();
}
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
index 3201a89f526656..c09413d60ddaaf 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
@@ -290,7 +290,16 @@ RISCVRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
switch (Opc) {
case TargetOpcode::G_ADD:
- case TargetOpcode::G_SUB:
+ case TargetOpcode::G_SUB: {
+ if (MRI.getType(MI.getOperand(0).getReg()).isVector()) {
+ LLT Ty = MRI.getType(MI.getOperand(0).getReg());
+ return getInstructionMapping(
+ DefaultMappingID, /*Cost=*/1,
+ getVRBValueMapping(Ty.getSizeInBits().getKnownMinValue()),
+ NumOperands);
+ }
+ }
+ LLVM_FALLTHROUGH;
case TargetOpcode::G_ANYEXT:
case TargetOpcode::G_SEXT:
case TargetOpcode::G_ZEXT: {
@@ -496,9 +505,10 @@ RISCVRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
getVRBValueMapping(DstTy.getSizeInBits().getKnownMinValue());
OpdsMapping[2] = OpdsMapping[3] =
getVRBValueMapping(SrcTy.getSizeInBits().getKnownMinValue());
+ break;
}
- break;
}
+ LLVM_FALLTHROUGH;
case TargetOpcode::G_FCMP: {
LLT Ty = MRI.getType(MI.getOperand(2).getReg());
diff --git a/llvm/test/MachineVerifier/test_g_fcmp.mir b/llvm/test/MachineVerifier/test_g_fcmp.mir
index 2052acdd99ebb6..17be746a63ff4f 100644
--- a/llvm/test/MachineVerifier/test_g_fcmp.mir
+++ b/llvm/test/MachineVerifier/test_g_fcmp.mir
@@ -32,14 +32,14 @@ body: |
; mismatched scalable element count
; CHECK: Bad machine code: Generic vector icmp/fcmp must preserve number of
- %6:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
- %7:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
- %8:_(<vscale x 4 x s1>) = G_FCMP floatpred(oeq), %6, %7
+ %9:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ %10:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ %11:_(<vscale x 4 x s1>) = G_FCMP floatpred(oeq), %9, %10
; mismatched scalar element type
; CHECK: *** Bad machine code: Type mismatch in generic instruction ***
- %9:_(s32) = G_FCONSTANT float 0.0
- %10:_(s64) = G_FCONSTANT float 1.0
- %11:_(s1) = G_FCMP floatpred(oeq), %9, %10
+ %12:_(s32) = G_FCONSTANT float 0.0
+ %13:_(s64) = G_FCONSTANT float 1.0
+ %14:_(s1) = G_FCMP floatpred(oeq), %12, %13
...
diff --git a/llvm/test/MachineVerifier/test_g_icmp.mir b/llvm/test/MachineVerifier/test_g_icmp.mir
index 23960a49dbb1ad..74e3d34ebb5767 100644
--- a/llvm/test/MachineVerifier/test_g_icmp.mir
+++ b/llvm/test/MachineVerifier/test_g_icmp.mir
@@ -32,14 +32,14 @@ body: |
; mismatched scalable element count
; CHECK: Bad machine code: Generic vector icmp/fcmp must preserve number of
- %6:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
- %7:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
- %8:_(<vscale x 4 x s1>) = G_ICMP intpred(eq), %6, %7
+ %9:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ %10:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+ %11:_(<vscale x 4 x s1>) = G_ICMP intpred(eq), %9, %10
; mismatched scalar element type
; CHECK: *** Bad machine code: Type mismatch in generic instruction ***
- %9:_(s32) = G_CONSTANT i32 0
- %10:_(s64) = G_CONSTANT i32 1
- %11:_(s1) = G_ICMP intpred(eq), %9, %10
+ %12:_(s32) = G_CONSTANT i32 0
+ %13:_(s64) = G_CONSTANT i32 1
+ %14:_(s1) = G_ICMP intpred(eq), %12, %13
...
- Previous message: [llvm] [RISCV][GISEL] Legalize, regbank select, and instruction select G_ZEXT, G_SEXT, G_ANYEXT, G_SPLAT_VECTOR, and G_ICMP (PR #85938)
- Next message: [llvm] [RISCV][GISEL] Legalize, regbank select, and instruction select G_ZEXT, G_SEXT, G_ANYEXT, G_SPLAT_VECTOR, and G_ICMP (PR #85938)
- Messages sorted by:
[ date ]
[ thread ]
[ subject ]
[ author ]
More information about the llvm-commits
mailing list