[llvm] [RISCV][GISEL] Add support for scalable vector types in lowerReturnVal (PR #71587)

Michael Maitland via llvm-commits llvm-commits at lists.llvm.org
Wed Nov 8 10:38:39 PST 2023


================
@@ -0,0 +1,809 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfmin,+zvfh -global-isel -stop-after=irtranslator \
+; RUN:   -verify-machineinstrs < %s | FileCheck -check-prefixes=RV32 %s
+; RUN: llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfmin,+zvfh -global-isel -stop-after=irtranslator \
+; RUN:   -verify-machineinstrs < %s | FileCheck -check-prefixes=RV64 %s
+
+; ==========================================================================
+; ============================= Scalable Types =============================
+; ==========================================================================
+
+define <vscale x 1 x i8> @test_ret_nxv1i8() {
+  ; RV32-LABEL: name: test_ret_nxv1i8
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s8>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv1i8
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s8>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s8>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 1 x i8> undef
+}
+
+define <vscale x 2 x i8> @test_ret_nxv2i8() {
+  ; RV32-LABEL: name: test_ret_nxv2i8
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 2 x s8>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv2i8
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s8>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 2 x s8>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 2 x i8> undef
+}
+
+define <vscale x 4 x i8> @test_ret_nxv4i8() {
+  ; RV32-LABEL: name: test_ret_nxv4i8
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 4 x s8>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv4i8
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 4 x s8>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 4 x s8>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 4 x i8> undef
+}
+
+define <vscale x 8 x i8> @test_ret_nxv8i8() {
+  ; RV32-LABEL: name: test_ret_nxv8i8
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 8 x s8>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv8i8
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 8 x s8>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 8 x s8>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 8 x i8> undef
+}
+
+define <vscale x 16 x i8> @test_ret_nxv16i8() {
+  ; RV32-LABEL: name: test_ret_nxv16i8
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m2 = COPY [[DEF]](<vscale x 16 x s8>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m2
+  ;
+  ; RV64-LABEL: name: test_ret_nxv16i8
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 16 x s8>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m2 = COPY [[DEF]](<vscale x 16 x s8>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m2
+entry:
+  ret <vscale x 16 x i8> undef
+}
+
+define <vscale x 32 x i8> @test_ret_nxv32i8() {
+  ; RV32-LABEL: name: test_ret_nxv32i8
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m4 = COPY [[DEF]](<vscale x 32 x s8>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m4
+  ;
+  ; RV64-LABEL: name: test_ret_nxv32i8
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 32 x s8>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m4 = COPY [[DEF]](<vscale x 32 x s8>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m4
+entry:
+  ret <vscale x 32 x i8> undef
+}
+
+define <vscale x 64 x i8> @test_ret_nxv64i8() {
+  ; RV32-LABEL: name: test_ret_nxv64i8
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m8 = COPY [[DEF]](<vscale x 64 x s8>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv64i8
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 64 x s8>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m8 = COPY [[DEF]](<vscale x 64 x s8>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m8
+entry:
+  ret <vscale x 64 x i8> undef
+}
+
+define <vscale x 1 x i16> @test_ret_nxv1i16() {
+  ; RV32-LABEL: name: test_ret_nxv1i16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv1i16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s16>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 1 x i16> undef
+}
+
+define <vscale x 2 x i16> @test_ret_nxv2i16() {
+  ; RV32-LABEL: name: test_ret_nxv2i16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 2 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv2i16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s16>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 2 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 2 x i16> undef
+}
+
+define <vscale x 4 x i16> @test_ret_nxv4i16() {
+  ; RV32-LABEL: name: test_ret_nxv4i16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 4 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv4i16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 4 x s16>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 4 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 4 x i16> undef
+}
+
+define <vscale x 8 x i16> @test_ret_nxv8i16() {
+  ; RV32-LABEL: name: test_ret_nxv8i16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m2 = COPY [[DEF]](<vscale x 8 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m2
+  ;
+  ; RV64-LABEL: name: test_ret_nxv8i16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 8 x s16>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m2 = COPY [[DEF]](<vscale x 8 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m2
+entry:
+  ret <vscale x 8 x i16> undef
+}
+
+define <vscale x 16 x i16> @test_ret_nxv16i16() {
+  ; RV32-LABEL: name: test_ret_nxv16i16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m4 = COPY [[DEF]](<vscale x 16 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m4
+  ;
+  ; RV64-LABEL: name: test_ret_nxv16i16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 16 x s16>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m4 = COPY [[DEF]](<vscale x 16 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m4
+entry:
+  ret <vscale x 16 x i16> undef
+}
+
+define <vscale x 32 x i16> @test_ret_nxv32i16() {
+  ; RV32-LABEL: name: test_ret_nxv32i16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m8 = COPY [[DEF]](<vscale x 32 x s16>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv32i16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 32 x s16>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m8 = COPY [[DEF]](<vscale x 32 x s16>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m8
+entry:
+  ret <vscale x 32 x i16> undef
+}
+
+define <vscale x 1 x i32> @test_ret_nxv1i32() {
+  ; RV32-LABEL: name: test_ret_nxv1i32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s32>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv1i32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s32>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 1 x i32> undef
+}
+
+define <vscale x 2 x i32> @test_ret_nxv2i32() {
+  ; RV32-LABEL: name: test_ret_nxv2i32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 2 x s32>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv2i32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 2 x s32>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 2 x i32> undef
+}
+
+define <vscale x 4 x i32> @test_ret_nxv4i32() {
+  ; RV32-LABEL: name: test_ret_nxv4i32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m2 = COPY [[DEF]](<vscale x 4 x s32>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m2
+  ;
+  ; RV64-LABEL: name: test_ret_nxv4i32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m2 = COPY [[DEF]](<vscale x 4 x s32>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m2
+entry:
+  ret <vscale x 4 x i32> undef
+}
+
+define <vscale x 8 x i32> @test_ret_nxv8i32() {
+  ; RV32-LABEL: name: test_ret_nxv8i32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m4 = COPY [[DEF]](<vscale x 8 x s32>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m4
+  ;
+  ; RV64-LABEL: name: test_ret_nxv8i32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m4 = COPY [[DEF]](<vscale x 8 x s32>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m4
+entry:
+  ret <vscale x 8 x i32> undef
+}
+
+define <vscale x 16 x i32> @test_ret_nxv16i32() {
+  ; RV32-LABEL: name: test_ret_nxv16i32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m8 = COPY [[DEF]](<vscale x 16 x s32>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv16i32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m8 = COPY [[DEF]](<vscale x 16 x s32>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m8
+entry:
+  ret <vscale x 16 x i32> undef
+}
+
+define <vscale x 1 x i64> @test_ret_nxv1i64() {
+  ; RV32-LABEL: name: test_ret_nxv1i64
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s64>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv1i64
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s64>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 1 x i64> undef
+}
+
+define <vscale x 2 x i64> @test_ret_nxv2i64() {
+  ; RV32-LABEL: name: test_ret_nxv2i64
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m2 = COPY [[DEF]](<vscale x 2 x s64>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m2
+  ;
+  ; RV64-LABEL: name: test_ret_nxv2i64
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m2 = COPY [[DEF]](<vscale x 2 x s64>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m2
+entry:
+  ret <vscale x 2 x i64> undef
+}
+
+define <vscale x 4 x i64> @test_ret_nxv4i64() {
+  ; RV32-LABEL: name: test_ret_nxv4i64
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m4 = COPY [[DEF]](<vscale x 4 x s64>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m4
+  ;
+  ; RV64-LABEL: name: test_ret_nxv4i64
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 4 x s64>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m4 = COPY [[DEF]](<vscale x 4 x s64>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m4
+entry:
+  ret <vscale x 4 x i64> undef
+}
+
+define <vscale x 8 x i64> @test_ret_nxv8i64() {
+  ; RV32-LABEL: name: test_ret_nxv8i64
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m8 = COPY [[DEF]](<vscale x 8 x s64>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv8i64
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 8 x s64>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m8 = COPY [[DEF]](<vscale x 8 x s64>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m8
+entry:
+  ret <vscale x 8 x i64> undef
+}
+
+define <vscale x 64 x i1> @test_ret_nxv64i1() {
+  ; RV32-LABEL: name: test_ret_nxv64i1
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 64 x s1>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv64i1
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 64 x s1>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 64 x i1> undef
+}
+
+define <vscale x 32 x i1> @test_ret_nxv32i1() {
+  ; RV32-LABEL: name: test_ret_nxv32i1
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 32 x s1>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv32i1
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 32 x s1>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 32 x i1> undef
+}
+
+define <vscale x 16 x i1> @test_ret_nxv16i1() {
+  ; RV32-LABEL: name: test_ret_nxv16i1
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 16 x s1>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv16i1
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 16 x s1>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 16 x i1> undef
+}
+
+define <vscale x 8 x i1> @test_ret_nxv8i1() {
+  ; RV32-LABEL: name: test_ret_nxv8i1
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 8 x s1>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv8i1
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 8 x s1>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 8 x i1> undef
+}
+
+define <vscale x 4 x i1> @test_ret_nxv4i1() {
+  ; RV32-LABEL: name: test_ret_nxv4i1
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 4 x s1>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv4i1
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 4 x s1>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 4 x i1> undef
+}
+
+define <vscale x 2 x i1> @test_ret_nxv2i1() {
+  ; RV32-LABEL: name: test_ret_nxv2i1
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 2 x s1>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv2i1
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 2 x s1>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 2 x i1> undef
+}
+
+define <vscale x 1 x i1> @test_ret_nxv1i1() {
+  ; RV32-LABEL: name: test_ret_nxv1i1
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s1>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv1i1
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s1>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 1 x i1> undef
+}
+
+define <vscale x 1 x float> @test_ret_nxv1f32() {
+  ; RV32-LABEL: name: test_ret_nxv1f32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s32>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv1f32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s32>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s32>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 1 x float> undef
+}
+
+define <vscale x 2 x float> @test_ret_nxv2f32() {
+  ; RV32-LABEL: name: test_ret_nxv2f32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 2 x s32>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv2f32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s32>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 2 x s32>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 2 x float> undef
+}
+
+define <vscale x 4 x float> @test_ret_nxv4f32() {
+  ; RV32-LABEL: name: test_ret_nxv4f32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m2 = COPY [[DEF]](<vscale x 4 x s32>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m2
+  ;
+  ; RV64-LABEL: name: test_ret_nxv4f32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m2 = COPY [[DEF]](<vscale x 4 x s32>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m2
+entry:
+  ret <vscale x 4 x float> undef
+}
+
+define <vscale x 8 x float> @test_ret_nxv8f32() {
+  ; RV32-LABEL: name: test_ret_nxv8f32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m4 = COPY [[DEF]](<vscale x 8 x s32>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m4
+  ;
+  ; RV64-LABEL: name: test_ret_nxv8f32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 8 x s32>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m4 = COPY [[DEF]](<vscale x 8 x s32>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m4
+entry:
+  ret <vscale x 8 x float> undef
+}
+
+define <vscale x 16 x float> @test_ret_nxv16f32() {
+  ; RV32-LABEL: name: test_ret_nxv16f32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m8 = COPY [[DEF]](<vscale x 16 x s32>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv16f32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 16 x s32>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m8 = COPY [[DEF]](<vscale x 16 x s32>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m8
+entry:
+  ret <vscale x 16 x float> undef
+}
+
+define <vscale x 1 x double> @test_ret_nxv1f64() {
+  ; RV32-LABEL: name: test_ret_nxv1f64
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s64>)
+  ; RV32-NEXT:   PseudoRET implicit $v8
+  ;
+  ; RV64-LABEL: name: test_ret_nxv1f64
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s64>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s64>)
+  ; RV64-NEXT:   PseudoRET implicit $v8
+entry:
+  ret <vscale x 1 x double> undef
+}
+
+define <vscale x 2 x double> @test_ret_nxv2f64() {
+  ; RV32-LABEL: name: test_ret_nxv2f64
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+  ; RV32-NEXT:   $v8m2 = COPY [[DEF]](<vscale x 2 x s64>)
+  ; RV32-NEXT:   PseudoRET implicit $v8m2
+  ;
+  ; RV64-LABEL: name: test_ret_nxv2f64
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s64>) = G_IMPLICIT_DEF
+  ; RV64-NEXT:   $v8m2 = COPY [[DEF]](<vscale x 2 x s64>)
+  ; RV64-NEXT:   PseudoRET implicit $v8m2
+entry:
+  ret <vscale x 2 x double> undef
----------------
michaelmaitland wrote:

At this point in time, we have not done any work to support scalable vector loads or stores in the IR translator. That is why I went with undef.

https://github.com/llvm/llvm-project/pull/71587


More information about the llvm-commits mailing list