[llvm] 367c145 - [IRTranslator][RISCV] Support scalable vector zeroinitializer. (#108666)
via llvm-commits
llvm-commits at lists.llvm.org
Sat Sep 14 15:46:21 PDT 2024
Author: Craig Topper
Date: 2024-09-14T15:46:18-07:00
New Revision: 367c145e5fcfa36becca183e6ab9e7688afadc26
URL: https://github.com/llvm/llvm-project/commit/367c145e5fcfa36becca183e6ab9e7688afadc26
DIFF: https://github.com/llvm/llvm-project/commit/367c145e5fcfa36becca183e6ab9e7688afadc26.diff
LOG: [IRTranslator][RISCV] Support scalable vector zeroinitializer. (#108666)
Added:
llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/splat_vector.ll
Modified:
llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
Removed:
################################################################################
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 7fbefd05ae67c7..6496a39415ac6c 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -3528,11 +3528,13 @@ bool IRTranslator::translate(const Constant &C, Register Reg) {
Register AddrDisc = getOrCreateVReg(*CPA->getAddrDiscriminator());
EntryBuilder->buildConstantPtrAuth(Reg, CPA, Addr, AddrDisc);
} else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) {
- if (!isa<FixedVectorType>(CAZ->getType()))
- return false;
+ Constant &Elt = *CAZ->getElementValue(0u);
+ if (isa<ScalableVectorType>(CAZ->getType())) {
+ EntryBuilder->buildSplatVector(Reg, getOrCreateVReg(Elt));
+ return true;
+ }
// Return the scalar if it is a <1 x Ty> vector.
unsigned NumElts = CAZ->getElementCount().getFixedValue();
- Constant &Elt = *CAZ->getElementValue(0u);
if (NumElts == 1)
return translateCopy(C, Elt, *EntryBuilder);
// All elements are zero so we can just use the first one.
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/splat_vector.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/splat_vector.ll
new file mode 100644
index 00000000000000..6a1c3ca2b0b674
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/splat_vector.ll
@@ -0,0 +1,821 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=riscv32 -mattr=+v,+zvfh -global-isel -stop-after=irtranslator \
+; RUN: -verify-machineinstrs < %s | FileCheck -check-prefixes=RV32 %s
+; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfh -global-isel -stop-after=irtranslator \
+; RUN: -verify-machineinstrs < %s | FileCheck -check-prefixes=RV64 %s
+
+define <vscale x 1 x i1> @splat_zero_nxv1i1() {
+ ; RV32-LABEL: name: splat_zero_nxv1i1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s1>) = G_SPLAT_VECTOR [[C]](s1)
+ ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: splat_zero_nxv1i1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s1>) = G_SPLAT_VECTOR [[C]](s1)
+ ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ ret <vscale x 1 x i1> zeroinitializer
+}
+
+define <vscale x 2 x i1> @splat_zero_nxv2i1() {
+ ; RV32-LABEL: name: splat_zero_nxv2i1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s1>) = G_SPLAT_VECTOR [[C]](s1)
+ ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: splat_zero_nxv2i1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s1>) = G_SPLAT_VECTOR [[C]](s1)
+ ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ ret <vscale x 2 x i1> zeroinitializer
+}
+
+define <vscale x 4 x i1> @splat_zero_nxv4i1() {
+ ; RV32-LABEL: name: splat_zero_nxv4i1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s1>) = G_SPLAT_VECTOR [[C]](s1)
+ ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: splat_zero_nxv4i1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s1>) = G_SPLAT_VECTOR [[C]](s1)
+ ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ ret <vscale x 4 x i1> zeroinitializer
+}
+
+define <vscale x 8 x i1> @splat_zero_nxv8i1() {
+ ; RV32-LABEL: name: splat_zero_nxv8i1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s1>) = G_SPLAT_VECTOR [[C]](s1)
+ ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: splat_zero_nxv8i1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s1>) = G_SPLAT_VECTOR [[C]](s1)
+ ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ ret <vscale x 8 x i1> zeroinitializer
+}
+
+define <vscale x 16 x i1> @splat_zero_nxv16i1() {
+ ; RV32-LABEL: name: splat_zero_nxv16i1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s1>) = G_SPLAT_VECTOR [[C]](s1)
+ ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: splat_zero_nxv16i1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s1>) = G_SPLAT_VECTOR [[C]](s1)
+ ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ ret <vscale x 16 x i1> zeroinitializer
+}
+
+define <vscale x 32 x i1> @splat_zero_nxv32i1() {
+ ; RV32-LABEL: name: splat_zero_nxv32i1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s1>) = G_SPLAT_VECTOR [[C]](s1)
+ ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 32 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: splat_zero_nxv32i1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s1>) = G_SPLAT_VECTOR [[C]](s1)
+ ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 32 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ ret <vscale x 32 x i1> zeroinitializer
+}
+
+define <vscale x 64 x i1> @splat_zero_nxv64i1() {
+ ; RV32-LABEL: name: splat_zero_nxv64i1
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 64 x s1>) = G_SPLAT_VECTOR [[C]](s1)
+ ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 64 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
+ ;
+ ; RV64-LABEL: name: splat_zero_nxv64i1
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 64 x s1>) = G_SPLAT_VECTOR [[C]](s1)
+ ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 64 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
+ ret <vscale x 64 x i1> zeroinitializer
+}
+
+define <vscale x 1 x i8> @splat_zero_nxv1i8() {
+ ; RV32-LABEL: name: splat_zero_nxv1i8
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[C]](s8)
+ ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: splat_zero_nxv1i8
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[C]](s8)
+ ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ ret <vscale x 1 x i8> zeroinitializer
+}
+
+define <vscale x 2 x i8> @splat_zero_nxv2i8() {
+ ; RV32-LABEL: name: splat_zero_nxv2i8
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C]](s8)
+ ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: splat_zero_nxv2i8
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C]](s8)
+ ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ ret <vscale x 2 x i8> zeroinitializer
+}
+
+define <vscale x 4 x i8> @splat_zero_nxv4i8() {
+ ; RV32-LABEL: name: splat_zero_nxv4i8
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C]](s8)
+ ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: splat_zero_nxv4i8
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C]](s8)
+ ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ ret <vscale x 4 x i8> zeroinitializer
+}
+
+define <vscale x 8 x i8> @splat_zero_nxv8i8() {
+ ; RV32-LABEL: name: splat_zero_nxv8i8
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[C]](s8)
+ ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: splat_zero_nxv8i8
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[C]](s8)
+ ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ ret <vscale x 8 x i8> zeroinitializer
+}
+
+define <vscale x 16 x i8> @splat_zero_nxv16i8() {
+ ; RV32-LABEL: name: splat_zero_nxv16i8
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[C]](s8)
+ ; RV32-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: splat_zero_nxv16i8
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[C]](s8)
+ ; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ ret <vscale x 16 x i8> zeroinitializer
+}
+
+define <vscale x 32 x i8> @splat_zero_nxv32i8() {
+ ; RV32-LABEL: name: splat_zero_nxv32i8
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[C]](s8)
+ ; RV32-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 32 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: splat_zero_nxv32i8
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[C]](s8)
+ ; RV64-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 32 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ ret <vscale x 32 x i8> zeroinitializer
+}
+
+define <vscale x 64 x i8> @splat_zero_nxv64i8() {
+ ; RV32-LABEL: name: splat_zero_nxv64i8
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[C]](s8)
+ ; RV32-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 64 x s8>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: splat_zero_nxv64i8
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[C]](s8)
+ ; RV64-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 64 x s8>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ ret <vscale x 64 x i8> zeroinitializer
+}
+
+define <vscale x 1 x i16> @splat_zero_nxv1i16() {
+ ; RV32-LABEL: name: splat_zero_nxv1i16
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[C]](s16)
+ ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: splat_zero_nxv1i16
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[C]](s16)
+ ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ ret <vscale x 1 x i16> zeroinitializer
+}
+
+define <vscale x 2 x i16> @splat_zero_nxv2i16() {
+ ; RV32-LABEL: name: splat_zero_nxv2i16
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[C]](s16)
+ ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: splat_zero_nxv2i16
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[C]](s16)
+ ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ ret <vscale x 2 x i16> zeroinitializer
+}
+
+define <vscale x 4 x i16> @splat_zero_nxv4i16() {
+ ; RV32-LABEL: name: splat_zero_nxv4i16
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[C]](s16)
+ ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: splat_zero_nxv4i16
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[C]](s16)
+ ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ ret <vscale x 4 x i16> zeroinitializer
+}
+
+define <vscale x 8 x i16> @splat_zero_nxv8i16() {
+ ; RV32-LABEL: name: splat_zero_nxv8i16
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[C]](s16)
+ ; RV32-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: splat_zero_nxv8i16
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[C]](s16)
+ ; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ ret <vscale x 8 x i16> zeroinitializer
+}
+
+define <vscale x 16 x i16> @splat_zero_nxv16i16() {
+ ; RV32-LABEL: name: splat_zero_nxv16i16
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[C]](s16)
+ ; RV32-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: splat_zero_nxv16i16
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[C]](s16)
+ ; RV64-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ ret <vscale x 16 x i16> zeroinitializer
+}
+
+define <vscale x 32 x i16> @splat_zero_nxv32i16() {
+ ; RV32-LABEL: name: splat_zero_nxv32i16
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SPLAT_VECTOR [[C]](s16)
+ ; RV32-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 32 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: splat_zero_nxv32i16
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SPLAT_VECTOR [[C]](s16)
+ ; RV64-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 32 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ ret <vscale x 32 x i16> zeroinitializer
+}
+
+define <vscale x 1 x i32> @splat_zero_nxv1i32() {
+ ; RV32-LABEL: name: splat_zero_nxv1i32
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: splat_zero_nxv1i32
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ ret <vscale x 1 x i32> zeroinitializer
+}
+
+define <vscale x 2 x i32> @splat_zero_nxv2i32() {
+ ; RV32-LABEL: name: splat_zero_nxv2i32
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: splat_zero_nxv2i32
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ ret <vscale x 2 x i32> zeroinitializer
+}
+
+define <vscale x 4 x i32> @splat_zero_nxv4i32() {
+ ; RV32-LABEL: name: splat_zero_nxv4i32
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: splat_zero_nxv4i32
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ ret <vscale x 4 x i32> zeroinitializer
+}
+
+define <vscale x 8 x i32> @splat_zero_nxv8i32() {
+ ; RV32-LABEL: name: splat_zero_nxv8i32
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: splat_zero_nxv8i32
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV64-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ ret <vscale x 8 x i32> zeroinitializer
+}
+
+define <vscale x 16 x i32> @splat_zero_nxv16i32() {
+ ; RV32-LABEL: name: splat_zero_nxv16i32
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: splat_zero_nxv16i32
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV64-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ ret <vscale x 16 x i32> zeroinitializer
+}
+
+define <vscale x 1 x i64> @splat_zero_nxv1i64() {
+ ; RV32-LABEL: name: splat_zero_nxv1i64
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR [[C]](s64)
+ ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: splat_zero_nxv1i64
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR [[C]](s64)
+ ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ ret <vscale x 1 x i64> zeroinitializer
+}
+
+define <vscale x 2 x i64> @splat_zero_nxv2i64() {
+ ; RV32-LABEL: name: splat_zero_nxv2i64
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR [[C]](s64)
+ ; RV32-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: splat_zero_nxv2i64
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR [[C]](s64)
+ ; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ ret <vscale x 2 x i64> zeroinitializer
+}
+
+define <vscale x 4 x i64> @splat_zero_nxv4i64() {
+ ; RV32-LABEL: name: splat_zero_nxv4i64
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR [[C]](s64)
+ ; RV32-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: splat_zero_nxv4i64
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR [[C]](s64)
+ ; RV64-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ ret <vscale x 4 x i64> zeroinitializer
+}
+
+define <vscale x 8 x i64> @splat_zero_nxv8i64() {
+ ; RV32-LABEL: name: splat_zero_nxv8i64
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR [[C]](s64)
+ ; RV32-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: splat_zero_nxv8i64
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR [[C]](s64)
+ ; RV64-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ ret <vscale x 8 x i64> zeroinitializer
+}
+
+define <vscale x 1 x half> @splat_zero_nxv1half() {
+ ; RV32-LABEL: name: splat_zero_nxv1half
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[C]](s16)
+ ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: splat_zero_nxv1half
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[C]](s16)
+ ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ ret <vscale x 1 x half> zeroinitializer
+}
+
+define <vscale x 2 x half> @splat_zero_nxv2half() {
+ ; RV32-LABEL: name: splat_zero_nxv2half
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[C]](s16)
+ ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: splat_zero_nxv2half
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[C]](s16)
+ ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ ret <vscale x 2 x half> zeroinitializer
+}
+
+define <vscale x 4 x half> @splat_zero_nxv4half() {
+ ; RV32-LABEL: name: splat_zero_nxv4half
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[C]](s16)
+ ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: splat_zero_nxv4half
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[C]](s16)
+ ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ ret <vscale x 4 x half> zeroinitializer
+}
+
+define <vscale x 8 x half> @splat_zero_nxv8half() {
+ ; RV32-LABEL: name: splat_zero_nxv8half
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[C]](s16)
+ ; RV32-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: splat_zero_nxv8half
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[C]](s16)
+ ; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ ret <vscale x 8 x half> zeroinitializer
+}
+
+define <vscale x 16 x half> @splat_zero_nxv16half() {
+ ; RV32-LABEL: name: splat_zero_nxv16half
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[C]](s16)
+ ; RV32-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: splat_zero_nxv16half
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[C]](s16)
+ ; RV64-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ ret <vscale x 16 x half> zeroinitializer
+}
+
+define <vscale x 32 x half> @splat_zero_nxv32half() {
+ ; RV32-LABEL: name: splat_zero_nxv32half
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SPLAT_VECTOR [[C]](s16)
+ ; RV32-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 32 x s16>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: splat_zero_nxv32half
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SPLAT_VECTOR [[C]](s16)
+ ; RV64-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 32 x s16>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ ret <vscale x 32 x half> zeroinitializer
+}
+
+define <vscale x 1 x float> @splat_zero_nxv1float() {
+ ; RV32-LABEL: name: splat_zero_nxv1float
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: splat_zero_nxv1float
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ ret <vscale x 1 x float> zeroinitializer
+}
+
+define <vscale x 2 x float> @splat_zero_nxv2float() {
+ ; RV32-LABEL: name: splat_zero_nxv2float
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: splat_zero_nxv2float
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ ret <vscale x 2 x float> zeroinitializer
+}
+
+define <vscale x 4 x float> @splat_zero_nxv4float() {
+ ; RV32-LABEL: name: splat_zero_nxv4float
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: splat_zero_nxv4float
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ ret <vscale x 4 x float> zeroinitializer
+}
+
+define <vscale x 8 x float> @splat_zero_nxv8float() {
+ ; RV32-LABEL: name: splat_zero_nxv8float
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: splat_zero_nxv8float
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV64-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ ret <vscale x 8 x float> zeroinitializer
+}
+
+define <vscale x 16 x float> @splat_zero_nxv16float() {
+ ; RV32-LABEL: name: splat_zero_nxv16float
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV32-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s32>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: splat_zero_nxv16float
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+ ; RV64-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s32>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ ret <vscale x 16 x float> zeroinitializer
+}
+
+define <vscale x 1 x double> @splat_zero_nxv1double() {
+ ; RV32-LABEL: name: splat_zero_nxv1double
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 0.000000e+00
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR [[C]](s64)
+ ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: splat_zero_nxv1double
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 0.000000e+00
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR [[C]](s64)
+ ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ ret <vscale x 1 x double> zeroinitializer
+}
+
+define <vscale x 2 x double> @splat_zero_nxv2double() {
+ ; RV32-LABEL: name: splat_zero_nxv2double
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 0.000000e+00
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR [[C]](s64)
+ ; RV32-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: splat_zero_nxv2double
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 0.000000e+00
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR [[C]](s64)
+ ; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ ret <vscale x 2 x double> zeroinitializer
+}
+
+define <vscale x 4 x double> @splat_zero_nxv4double() {
+ ; RV32-LABEL: name: splat_zero_nxv4double
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 0.000000e+00
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR [[C]](s64)
+ ; RV32-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: splat_zero_nxv4double
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 0.000000e+00
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR [[C]](s64)
+ ; RV64-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ ret <vscale x 4 x double> zeroinitializer
+}
+
+define <vscale x 8 x double> @splat_zero_nxv8double() {
+ ; RV32-LABEL: name: splat_zero_nxv8double
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 0.000000e+00
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR [[C]](s64)
+ ; RV32-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s64>)
+ ; RV32-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64-LABEL: name: splat_zero_nxv8double
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 0.000000e+00
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR [[C]](s64)
+ ; RV64-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s64>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ ret <vscale x 8 x double> zeroinitializer
+}
+
+define <vscale x 1 x ptr> @splat_zero_nxv1ptr() {
+ ; RV32-LABEL: name: splat_zero_nxv1ptr
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(p0) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x p0>) = G_SPLAT_VECTOR [[C]](p0)
+ ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x p0>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: splat_zero_nxv1ptr
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(p0) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x p0>) = G_SPLAT_VECTOR [[C]](p0)
+ ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x p0>)
+ ; RV64-NEXT: PseudoRET implicit $v8
+ ret <vscale x 1 x ptr> zeroinitializer
+}
+
+define <vscale x 2 x ptr> @splat_zero_nxv2ptr() {
+ ; RV32-LABEL: name: splat_zero_nxv2ptr
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(p0) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x p0>) = G_SPLAT_VECTOR [[C]](p0)
+ ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x p0>)
+ ; RV32-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64-LABEL: name: splat_zero_nxv2ptr
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(p0) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x p0>) = G_SPLAT_VECTOR [[C]](p0)
+ ; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 2 x p0>)
+ ; RV64-NEXT: PseudoRET implicit $v8m2
+ ret <vscale x 2 x ptr> zeroinitializer
+}
+
+define <vscale x 4 x ptr> @splat_zero_nxv4ptr() {
+ ; RV32-LABEL: name: splat_zero_nxv4ptr
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(p0) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x p0>) = G_SPLAT_VECTOR [[C]](p0)
+ ; RV32-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 4 x p0>)
+ ; RV32-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64-LABEL: name: splat_zero_nxv4ptr
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(p0) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x p0>) = G_SPLAT_VECTOR [[C]](p0)
+ ; RV64-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 4 x p0>)
+ ; RV64-NEXT: PseudoRET implicit $v8m4
+ ret <vscale x 4 x ptr> zeroinitializer
+}
+
+define <vscale x 8 x ptr> @splat_zero_nxv8ptr() {
+ ; RV32-LABEL: name: splat_zero_nxv8ptr
+ ; RV32: bb.1 (%ir-block.0):
+ ; RV32-NEXT: [[C:%[0-9]+]]:_(p0) = G_CONSTANT i32 0
+ ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x p0>) = G_SPLAT_VECTOR [[C]](p0)
+ ; RV32-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 8 x p0>)
+ ; RV32-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64-LABEL: name: splat_zero_nxv8ptr
+ ; RV64: bb.1 (%ir-block.0):
+ ; RV64-NEXT: [[C:%[0-9]+]]:_(p0) = G_CONSTANT i64 0
+ ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x p0>) = G_SPLAT_VECTOR [[C]](p0)
+ ; RV64-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x p0>)
+ ; RV64-NEXT: PseudoRET implicit $v8m8
+ ret <vscale x 8 x ptr> zeroinitializer
+}
More information about the llvm-commits
mailing list