[llvm] [RISCV][GISEL] regbankselect for G_SPLAT_VECTOR (PR #110744)
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Tue Oct 1 15:18:17 PDT 2024
================
@@ -0,0 +1,587 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=riscv64 -mattr=+m,+v -run-pass=regbankselect \
+# RUN: -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN: -o - | FileCheck %s
+
+---
+name: splat_zero_nxv1i8
+legalized: true
+regBankSelected: false
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splat_zero_nxv1i8
+ ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:gprb(s64) = G_ANYEXT [[C]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %3:_(s32) = G_CONSTANT i32 0
+ %2:_(s64) = G_ANYEXT %3(s32)
+ %0:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR %2(s64)
+ $v8 = COPY %0(<vscale x 1 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: splat_zero_nxv2i8
+legalized: true
+regBankSelected: false
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splat_zero_nxv2i8
+ ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:gprb(s64) = G_ANYEXT [[C]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %3:_(s32) = G_CONSTANT i32 0
+ %2:_(s64) = G_ANYEXT %3(s32)
+ %0:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR %2(s64)
+ $v8 = COPY %0(<vscale x 2 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: splat_zero_nxv4i8
+legalized: true
+regBankSelected: false
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splat_zero_nxv4i8
+ ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:gprb(s64) = G_ANYEXT [[C]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %3:_(s32) = G_CONSTANT i32 0
+ %2:_(s64) = G_ANYEXT %3(s32)
+ %0:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR %2(s64)
+ $v8 = COPY %0(<vscale x 4 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: splat_zero_nxv8i8
+legalized: true
+regBankSelected: false
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splat_zero_nxv8i8
+ ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:gprb(s64) = G_ANYEXT [[C]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %3:_(s32) = G_CONSTANT i32 0
+ %2:_(s64) = G_ANYEXT %3(s32)
+ %0:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR %2(s64)
+ $v8 = COPY %0(<vscale x 8 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: splat_zero_nxv16i8
+legalized: true
+regBankSelected: false
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splat_zero_nxv16i8
+ ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:gprb(s64) = G_ANYEXT [[C]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; CHECK-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %3:_(s32) = G_CONSTANT i32 0
+ %2:_(s64) = G_ANYEXT %3(s32)
+ %0:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR %2(s64)
+ $v8m2 = COPY %0(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: splat_zero_nxv32i8
+legalized: true
+regBankSelected: false
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splat_zero_nxv32i8
+ ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:gprb(s64) = G_ANYEXT [[C]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; CHECK-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 32 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %3:_(s32) = G_CONSTANT i32 0
+ %2:_(s64) = G_ANYEXT %3(s32)
+ %0:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR %2(s64)
+ $v8m4 = COPY %0(<vscale x 32 x s8>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: splat_zero_nxv64i8
+legalized: true
+regBankSelected: false
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splat_zero_nxv64i8
+ ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:gprb(s64) = G_ANYEXT [[C]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; CHECK-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 64 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %3:_(s32) = G_CONSTANT i32 0
+ %2:_(s64) = G_ANYEXT %3(s32)
+ %0:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR %2(s64)
+ $v8m8 = COPY %0(<vscale x 64 x s8>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: splat_zero_nxv1i16
+legalized: true
+regBankSelected: false
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splat_zero_nxv1i16
+ ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:gprb(s64) = G_ANYEXT [[C]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %3:_(s32) = G_CONSTANT i32 0
+ %2:_(s64) = G_ANYEXT %3(s32)
+ %0:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR %2(s64)
+ $v8 = COPY %0(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: splat_zero_nxv2i16
+legalized: true
+regBankSelected: false
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splat_zero_nxv2i16
+ ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:gprb(s64) = G_ANYEXT [[C]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %3:_(s32) = G_CONSTANT i32 0
+ %2:_(s64) = G_ANYEXT %3(s32)
+ %0:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR %2(s64)
+ $v8 = COPY %0(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: splat_zero_nxv4i16
+legalized: true
+regBankSelected: false
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splat_zero_nxv4i16
+ ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:gprb(s64) = G_ANYEXT [[C]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %3:_(s32) = G_CONSTANT i32 0
+ %2:_(s64) = G_ANYEXT %3(s32)
+ %0:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR %2(s64)
+ $v8 = COPY %0(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: splat_zero_nxv8i16
+legalized: true
+regBankSelected: false
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splat_zero_nxv8i16
+ ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:gprb(s64) = G_ANYEXT [[C]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; CHECK-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %3:_(s32) = G_CONSTANT i32 0
+ %2:_(s64) = G_ANYEXT %3(s32)
+ %0:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR %2(s64)
+ $v8m2 = COPY %0(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: splat_zero_nxv16i16
+legalized: true
+regBankSelected: false
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splat_zero_nxv16i16
+ ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:gprb(s64) = G_ANYEXT [[C]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; CHECK-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %3:_(s32) = G_CONSTANT i32 0
+ %2:_(s64) = G_ANYEXT %3(s32)
+ %0:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR %2(s64)
+ $v8m4 = COPY %0(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: splat_zero_nxv32i16
+legalized: true
+regBankSelected: false
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splat_zero_nxv32i16
+ ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:gprb(s64) = G_ANYEXT [[C]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; CHECK-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 32 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %3:_(s32) = G_CONSTANT i32 0
+ %2:_(s64) = G_ANYEXT %3(s32)
+ %0:_(<vscale x 32 x s16>) = G_SPLAT_VECTOR %2(s64)
+ $v8m8 = COPY %0(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: splat_zero_nxv1i32
+legalized: true
+regBankSelected: false
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splat_zero_nxv1i32
+ ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:gprb(s64) = G_ANYEXT [[C]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(s32) = G_CONSTANT i32 0
+ %2:_(s64) = G_ANYEXT %1(s32)
+ %0:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR %2(s64)
+ $v8 = COPY %0(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: splat_zero_nxv2i32
+legalized: true
+regBankSelected: false
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splat_zero_nxv2i32
+ ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:gprb(s64) = G_ANYEXT [[C]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(s32) = G_CONSTANT i32 0
+ %2:_(s64) = G_ANYEXT %1(s32)
+ %0:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR %2(s64)
+ $v8 = COPY %0(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: splat_zero_nxv4i32
+legalized: true
+regBankSelected: false
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splat_zero_nxv4i32
+ ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:gprb(s64) = G_ANYEXT [[C]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; CHECK-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %1:_(s32) = G_CONSTANT i32 0
+ %2:_(s64) = G_ANYEXT %1(s32)
+ %0:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR %2(s64)
+ $v8m2 = COPY %0(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: splat_zero_nxv8i32
+legalized: true
+regBankSelected: false
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splat_zero_nxv8i32
+ ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:gprb(s64) = G_ANYEXT [[C]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; CHECK-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %1:_(s32) = G_CONSTANT i32 0
+ %2:_(s64) = G_ANYEXT %1(s32)
+ %0:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR %2(s64)
+ $v8m4 = COPY %0(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: splat_zero_nxv16i32
+legalized: true
+regBankSelected: false
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splat_zero_nxv16i32
+ ; CHECK: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:gprb(s64) = G_ANYEXT [[C]](s32)
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[ANYEXT]](s64)
+ ; CHECK-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %1:_(s32) = G_CONSTANT i32 0
+ %2:_(s64) = G_ANYEXT %1(s32)
+ %0:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR %2(s64)
+ $v8m8 = COPY %0(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: splat_zero_nxv1i64
+legalized: true
+regBankSelected: false
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splat_zero_nxv1i64
+ ; CHECK: [[C:%[0-9]+]]:gprb(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_SPLAT_VECTOR [[C]](s64)
+ ; CHECK-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(s64) = G_CONSTANT i64 0
+ %0:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR %1(s64)
+ $v8 = COPY %0(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+
+...
+---
+name: splat_zero_nxv2i64
+legalized: true
+regBankSelected: false
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splat_zero_nxv2i64
+ ; CHECK: [[C:%[0-9]+]]:gprb(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_SPLAT_VECTOR [[C]](s64)
+ ; CHECK-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %1:_(s64) = G_CONSTANT i64 0
+ %0:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR %1(s64)
+ $v8m2 = COPY %0(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: splat_zero_nxv4i64
+legalized: true
+regBankSelected: false
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splat_zero_nxv4i64
+ ; CHECK: [[C:%[0-9]+]]:gprb(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_SPLAT_VECTOR [[C]](s64)
+ ; CHECK-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %1:_(s64) = G_CONSTANT i64 0
+ %0:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR %1(s64)
+ $v8m4 = COPY %0(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: splat_zero_nxv8i64
+legalized: true
+regBankSelected: false
+body: |
+ bb.1:
+ ; CHECK-LABEL: name: splat_zero_nxv8i64
+ ; CHECK: [[C:%[0-9]+]]:gprb(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_SPLAT_VECTOR [[C]](s64)
+ ; CHECK-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %1:_(s64) = G_CONSTANT i64 0
+ %0:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR %1(s64)
+ $v8m8 = COPY %0(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: splat_zero_nxv1float
----------------
topperc wrote:
float -> f32
https://github.com/llvm/llvm-project/pull/110744
More information about the llvm-commits
mailing list