[llvm] [IRTranslator][RISCV] Support scalable vector zeroinitializer. (PR #108666)

via llvm-commits llvm-commits at lists.llvm.org
Fri Sep 13 17:12:21 PDT 2024


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-llvm-globalisel

Author: Craig Topper (topperc)

<details>
<summary>Changes</summary>



---

Patch is 38.08 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/108666.diff


2 Files Affected:

- (modified) llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp (+5-2) 
- (added) llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/splat_vector.ll (+821) 


``````````diff
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index b85087c23845d5..5053344c456dcc 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -3528,8 +3528,11 @@ bool IRTranslator::translate(const Constant &C, Register Reg) {
     Register AddrDisc = getOrCreateVReg(*CPA->getAddrDiscriminator());
     EntryBuilder->buildConstantPtrAuth(Reg, CPA, Addr, AddrDisc);
   } else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) {
-    if (!isa<FixedVectorType>(CAZ->getType()))
-      return false;
+    if (!isa<FixedVectorType>(CAZ->getType())) {
+      Constant &Elt = *CAZ->getElementValue(0u);
+      EntryBuilder->buildSplatVector(Reg, getOrCreateVReg(Elt));
+      return true;
+    }
     // Return the scalar if it is a <1 x Ty> vector.
     unsigned NumElts = CAZ->getElementCount().getFixedValue();
     if (NumElts == 1)
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/splat_vector.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/splat_vector.ll
new file mode 100644
index 00000000000000..6a1c3ca2b0b674
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/splat_vector.ll
@@ -0,0 +1,821 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=riscv32 -mattr=+v,+zvfh -global-isel -stop-after=irtranslator \
+; RUN:   -verify-machineinstrs < %s | FileCheck -check-prefixes=RV32 %s
+; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfh -global-isel -stop-after=irtranslator \
+; RUN:   -verify-machineinstrs < %s | FileCheck -check-prefixes=RV64 %s
+
+define <vscale x 1 x i1> @splat_zero_nxv1i1() {
+  ; RV32-LABEL: name: splat_zero_nxv1i1
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+  ; RV32-NEXT:   [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s1>) = G_SPLAT_VECTOR [[C]](s1)
+  ; RV32-NEXT:   $v0 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s1>)
+  ; RV32-NEXT:   PseudoRET implicit $v0
+  ;
+  ; RV64-LABEL: name: splat_zero_nxv1i1
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+  ; RV64-NEXT:   [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s1>) = G_SPLAT_VECTOR [[C]](s1)
+  ; RV64-NEXT:   $v0 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s1>)
+  ; RV64-NEXT:   PseudoRET implicit $v0
+  ret <vscale x 1 x i1> zeroinitializer
+}
+
+define <vscale x 2 x i1> @splat_zero_nxv2i1() {
+  ; RV32-LABEL: name: splat_zero_nxv2i1
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+  ; RV32-NEXT:   [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s1>) = G_SPLAT_VECTOR [[C]](s1)
+  ; RV32-NEXT:   $v0 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s1>)
+  ; RV32-NEXT:   PseudoRET implicit $v0
+  ;
+  ; RV64-LABEL: name: splat_zero_nxv2i1
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+  ; RV64-NEXT:   [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s1>) = G_SPLAT_VECTOR [[C]](s1)
+  ; RV64-NEXT:   $v0 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s1>)
+  ; RV64-NEXT:   PseudoRET implicit $v0
+  ret <vscale x 2 x i1> zeroinitializer
+}
+
+define <vscale x 4 x i1> @splat_zero_nxv4i1() {
+  ; RV32-LABEL: name: splat_zero_nxv4i1
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+  ; RV32-NEXT:   [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s1>) = G_SPLAT_VECTOR [[C]](s1)
+  ; RV32-NEXT:   $v0 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s1>)
+  ; RV32-NEXT:   PseudoRET implicit $v0
+  ;
+  ; RV64-LABEL: name: splat_zero_nxv4i1
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+  ; RV64-NEXT:   [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s1>) = G_SPLAT_VECTOR [[C]](s1)
+  ; RV64-NEXT:   $v0 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s1>)
+  ; RV64-NEXT:   PseudoRET implicit $v0
+  ret <vscale x 4 x i1> zeroinitializer
+}
+
+define <vscale x 8 x i1> @splat_zero_nxv8i1() {
+  ; RV32-LABEL: name: splat_zero_nxv8i1
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+  ; RV32-NEXT:   [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s1>) = G_SPLAT_VECTOR [[C]](s1)
+  ; RV32-NEXT:   $v0 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s1>)
+  ; RV32-NEXT:   PseudoRET implicit $v0
+  ;
+  ; RV64-LABEL: name: splat_zero_nxv8i1
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+  ; RV64-NEXT:   [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s1>) = G_SPLAT_VECTOR [[C]](s1)
+  ; RV64-NEXT:   $v0 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s1>)
+  ; RV64-NEXT:   PseudoRET implicit $v0
+  ret <vscale x 8 x i1> zeroinitializer
+}
+
+define <vscale x 16 x i1> @splat_zero_nxv16i1() {
+  ; RV32-LABEL: name: splat_zero_nxv16i1
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+  ; RV32-NEXT:   [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s1>) = G_SPLAT_VECTOR [[C]](s1)
+  ; RV32-NEXT:   $v0 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s1>)
+  ; RV32-NEXT:   PseudoRET implicit $v0
+  ;
+  ; RV64-LABEL: name: splat_zero_nxv16i1
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+  ; RV64-NEXT:   [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s1>) = G_SPLAT_VECTOR [[C]](s1)
+  ; RV64-NEXT:   $v0 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s1>)
+  ; RV64-NEXT:   PseudoRET implicit $v0
+  ret <vscale x 16 x i1> zeroinitializer
+}
+
+define <vscale x 32 x i1> @splat_zero_nxv32i1() {
+  ; RV32-LABEL: name: splat_zero_nxv32i1
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+  ; RV32-NEXT:   [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s1>) = G_SPLAT_VECTOR [[C]](s1)
+  ; RV32-NEXT:   $v0 = COPY [[SPLAT_VECTOR]](<vscale x 32 x s1>)
+  ; RV32-NEXT:   PseudoRET implicit $v0
+  ;
+  ; RV64-LABEL: name: splat_zero_nxv32i1
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+  ; RV64-NEXT:   [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s1>) = G_SPLAT_VECTOR [[C]](s1)
+  ; RV64-NEXT:   $v0 = COPY [[SPLAT_VECTOR]](<vscale x 32 x s1>)
+  ; RV64-NEXT:   PseudoRET implicit $v0
+  ret <vscale x 32 x i1> zeroinitializer
+}
+
+define <vscale x 64 x i1> @splat_zero_nxv64i1() {
+  ; RV32-LABEL: name: splat_zero_nxv64i1
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+  ; RV32-NEXT:   [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 64 x s1>) = G_SPLAT_VECTOR [[C]](s1)
+  ; RV32-NEXT:   $v0 = COPY [[SPLAT_VECTOR]](<vscale x 64 x s1>)
+  ; RV32-NEXT:   PseudoRET implicit $v0
+  ;
+  ; RV64-LABEL: name: splat_zero_nxv64i1
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+  ; RV64-NEXT:   [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 64 x s1>) = G_SPLAT_VECTOR [[C]](s1)
+  ; RV64-NEXT:   $v0 = COPY [[SPLAT_VECTOR]](<vscale x 64 x s1>)
+  ; RV64-NEXT:   PseudoRET implicit $v0
+  ret <vscale x 64 x i1> zeroinitializer
+}
+
+define <vscale x 1 x i8> @splat_zero_nxv1i8() {
+  ; RV32-LABEL: name: splat_zero_nxv1i8
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+  ; RV32-NEXT:   [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[C]](s8)
+  ; RV32-NEXT:   $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s8>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: splat_zero_nxv1i8
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+  ; RV64-NEXT:   [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[C]](s8)
+  ; RV64-NEXT:   $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s8>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+  ret <vscale x 1 x i8> zeroinitializer
+}
+
+define <vscale x 2 x i8> @splat_zero_nxv2i8() {
+  ; RV32-LABEL: name: splat_zero_nxv2i8
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+  ; RV32-NEXT:   [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C]](s8)
+  ; RV32-NEXT:   $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s8>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: splat_zero_nxv2i8
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+  ; RV64-NEXT:   [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C]](s8)
+  ; RV64-NEXT:   $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s8>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+  ret <vscale x 2 x i8> zeroinitializer
+}
+
+define <vscale x 4 x i8> @splat_zero_nxv4i8() {
+  ; RV32-LABEL: name: splat_zero_nxv4i8
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+  ; RV32-NEXT:   [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C]](s8)
+  ; RV32-NEXT:   $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s8>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: splat_zero_nxv4i8
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+  ; RV64-NEXT:   [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C]](s8)
+  ; RV64-NEXT:   $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s8>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+  ret <vscale x 4 x i8> zeroinitializer
+}
+
+define <vscale x 8 x i8> @splat_zero_nxv8i8() {
+  ; RV32-LABEL: name: splat_zero_nxv8i8
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+  ; RV32-NEXT:   [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[C]](s8)
+  ; RV32-NEXT:   $v8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s8>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: splat_zero_nxv8i8
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+  ; RV64-NEXT:   [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[C]](s8)
+  ; RV64-NEXT:   $v8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s8>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+  ret <vscale x 8 x i8> zeroinitializer
+}
+
+define <vscale x 16 x i8> @splat_zero_nxv16i8() {
+  ; RV32-LABEL: name: splat_zero_nxv16i8
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+  ; RV32-NEXT:   [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[C]](s8)
+  ; RV32-NEXT:   $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s8>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m2
+  ;
+  ; RV64-LABEL: name: splat_zero_nxv16i8
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+  ; RV64-NEXT:   [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[C]](s8)
+  ; RV64-NEXT:   $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s8>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m2
+  ret <vscale x 16 x i8> zeroinitializer
+}
+
+define <vscale x 32 x i8> @splat_zero_nxv32i8() {
+  ; RV32-LABEL: name: splat_zero_nxv32i8
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+  ; RV32-NEXT:   [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[C]](s8)
+  ; RV32-NEXT:   $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 32 x s8>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m4
+  ;
+  ; RV64-LABEL: name: splat_zero_nxv32i8
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+  ; RV64-NEXT:   [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[C]](s8)
+  ; RV64-NEXT:   $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 32 x s8>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m4
+  ret <vscale x 32 x i8> zeroinitializer
+}
+
+define <vscale x 64 x i8> @splat_zero_nxv64i8() {
+  ; RV32-LABEL: name: splat_zero_nxv64i8
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+  ; RV32-NEXT:   [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[C]](s8)
+  ; RV32-NEXT:   $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 64 x s8>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m8
+  ;
+  ; RV64-LABEL: name: splat_zero_nxv64i8
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
+  ; RV64-NEXT:   [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[C]](s8)
+  ; RV64-NEXT:   $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 64 x s8>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m8
+  ret <vscale x 64 x i8> zeroinitializer
+}
+
+define <vscale x 1 x i16> @splat_zero_nxv1i16() {
+  ; RV32-LABEL: name: splat_zero_nxv1i16
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+  ; RV32-NEXT:   [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[C]](s16)
+  ; RV32-NEXT:   $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: splat_zero_nxv1i16
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+  ; RV64-NEXT:   [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[C]](s16)
+  ; RV64-NEXT:   $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+  ret <vscale x 1 x i16> zeroinitializer
+}
+
+define <vscale x 2 x i16> @splat_zero_nxv2i16() {
+  ; RV32-LABEL: name: splat_zero_nxv2i16
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+  ; RV32-NEXT:   [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[C]](s16)
+  ; RV32-NEXT:   $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: splat_zero_nxv2i16
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+  ; RV64-NEXT:   [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[C]](s16)
+  ; RV64-NEXT:   $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+  ret <vscale x 2 x i16> zeroinitializer
+}
+
+define <vscale x 4 x i16> @splat_zero_nxv4i16() {
+  ; RV32-LABEL: name: splat_zero_nxv4i16
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+  ; RV32-NEXT:   [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[C]](s16)
+  ; RV32-NEXT:   $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: splat_zero_nxv4i16
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+  ; RV64-NEXT:   [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[C]](s16)
+  ; RV64-NEXT:   $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+  ret <vscale x 4 x i16> zeroinitializer
+}
+
+define <vscale x 8 x i16> @splat_zero_nxv8i16() {
+  ; RV32-LABEL: name: splat_zero_nxv8i16
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+  ; RV32-NEXT:   [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[C]](s16)
+  ; RV32-NEXT:   $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m2
+  ;
+  ; RV64-LABEL: name: splat_zero_nxv8i16
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+  ; RV64-NEXT:   [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[C]](s16)
+  ; RV64-NEXT:   $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m2
+  ret <vscale x 8 x i16> zeroinitializer
+}
+
+define <vscale x 16 x i16> @splat_zero_nxv16i16() {
+  ; RV32-LABEL: name: splat_zero_nxv16i16
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+  ; RV32-NEXT:   [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[C]](s16)
+  ; RV32-NEXT:   $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m4
+  ;
+  ; RV64-LABEL: name: splat_zero_nxv16i16
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+  ; RV64-NEXT:   [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[C]](s16)
+  ; RV64-NEXT:   $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m4
+  ret <vscale x 16 x i16> zeroinitializer
+}
+
+define <vscale x 32 x i16> @splat_zero_nxv32i16() {
+  ; RV32-LABEL: name: splat_zero_nxv32i16
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+  ; RV32-NEXT:   [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SPLAT_VECTOR [[C]](s16)
+  ; RV32-NEXT:   $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 32 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m8
+  ;
+  ; RV64-LABEL: name: splat_zero_nxv32i16
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+  ; RV64-NEXT:   [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SPLAT_VECTOR [[C]](s16)
+  ; RV64-NEXT:   $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 32 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m8
+  ret <vscale x 32 x i16> zeroinitializer
+}
+
+define <vscale x 1 x i32> @splat_zero_nxv1i32() {
+  ; RV32-LABEL: name: splat_zero_nxv1i32
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+  ; RV32-NEXT:   [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+  ; RV32-NEXT:   $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s32>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: splat_zero_nxv1i32
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+  ; RV64-NEXT:   [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+  ; RV64-NEXT:   $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s32>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+  ret <vscale x 1 x i32> zeroinitializer
+}
+
+define <vscale x 2 x i32> @splat_zero_nxv2i32() {
+  ; RV32-LABEL: name: splat_zero_nxv2i32
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+  ; RV32-NEXT:   [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+  ; RV32-NEXT:   $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s32>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: splat_zero_nxv2i32
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+  ; RV64-NEXT:   [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+  ; RV64-NEXT:   $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s32>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+  ret <vscale x 2 x i32> zeroinitializer
+}
+
+define <vscale x 4 x i32> @splat_zero_nxv4i32() {
+  ; RV32-LABEL: name: splat_zero_nxv4i32
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+  ; RV32-NEXT:   [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+  ; RV32-NEXT:   $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s32>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m2
+  ;
+  ; RV64-LABEL: name: splat_zero_nxv4i32
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+  ; RV64-NEXT:   [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+  ; RV64-NEXT:   $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s32>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m2
+  ret <vscale x 4 x i32> zeroinitializer
+}
+
+define <vscale x 8 x i32> @splat_zero_nxv8i32() {
+  ; RV32-LABEL: name: splat_zero_nxv8i32
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+  ; RV32-NEXT:   [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+  ; RV32-NEXT:   $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s32>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m4
+  ;
+  ; RV64-LABEL: name: splat_zero_nxv8i32
+  ; RV64: bb.1 (%ir-block.0):
+  ; RV64-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+  ; RV64-NEXT:   [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+  ; RV64-NEXT:   $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s32>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m4
+  ret <vscale x 8 x i32> zeroinitializer
+}
+
+define <vscale x 16 x i32> @splat_zero_nxv16i32() {
+  ; RV32-LABEL: name: splat_zero_nxv16i32
+  ; RV32: bb.1 (%ir-block.0):
+  ; RV32-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+  ; RV32-NEXT:   [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[C]](s32)
+  ; RV32-NEXT:   $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s32>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m8
+  ;
+  ; RV64-LABEL: name: splat_zero_nxv16i32
+  ; RV64: bb.1 (%ir-block.0):
+ ...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/108666


More information about the llvm-commits mailing list