[llvm] [RISCV][GISEL] Add support for lowerFormalArguments that contain scalable vector types (PR #70882)

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Thu Nov 2 16:37:41 PDT 2023


================
@@ -0,0 +1,1233 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+v -global-isel -stop-after=irtranslator \
+; RUN:   -verify-machineinstrs < %s | FileCheck -check-prefix=RV32 %s
+; RUN: llc -mtriple=riscv64 -mattr=+v -global-isel -stop-after=irtranslator \
+; RUN:   -verify-machineinstrs < %s | FileCheck -check-prefix=RV64 %s
+
+; ==========================================================================
+; ============================= Scalable Types =============================
+; ==========================================================================
+
+define void @test_args_nxv1i8(<vscale x 1 x i8> %a) {
+  ; RV32-LABEL: name: test_args_nxv1i8
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv1i8
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv2i8(<vscale x 2 x i8> %a) {
+  ; RV32-LABEL: name: test_args_nxv2i8
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv2i8
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv4i8(<vscale x 4 x i8> %a) {
+  ; RV32-LABEL: name: test_args_nxv4i8
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv4i8
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv8i8(<vscale x 8 x i8> %a) {
+  ; RV32-LABEL: name: test_args_nxv8i8
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv8i8
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv16i8(<vscale x 16 x i8> %a) {
+  ; RV32-LABEL: name: test_args_nxv16i8
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8m2
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv16i8
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8m2
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv32i8(<vscale x 32 x i8> %a) {
+  ; RV32-LABEL: name: test_args_nxv32i8
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8m4
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8m4
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv32i8
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8m4
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8m4
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv64i8(<vscale x 64 x i8> %a) {
+  ; RV32-LABEL: name: test_args_nxv64i8
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8m8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8m8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv64i8
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8m8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8m8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv1i16(<vscale x 1 x i16> %a) {
+  ; RV32-LABEL: name: test_args_nxv1i16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv1i16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv2i16(<vscale x 2 x i16> %a) {
+  ; RV32-LABEL: name: test_args_nxv2i16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv2i16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv4i16(<vscale x 4 x i16> %a) {
+  ; RV32-LABEL: name: test_args_nxv4i16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv4i16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv8i16(<vscale x 8 x i16> %a) {
+  ; RV32-LABEL: name: test_args_nxv8i16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8m2
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv8i16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8m2
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv16i16(<vscale x 16 x i16> %a) {
+  ; RV32-LABEL: name: test_args_nxv16i16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8m4
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv16i16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8m4
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv32i16(<vscale x 32 x i16> %a) {
+  ; RV32-LABEL: name: test_args_nxv32i16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8m8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8m8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv32i16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8m8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8m8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv1i32(<vscale x 1 x i32> %a) {
+  ; RV32-LABEL: name: test_args_nxv1i32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv1i32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv2i32(<vscale x 2 x i32> %a) {
+  ; RV32-LABEL: name: test_args_nxv2i32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv2i32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv4i32(<vscale x 4 x i32> %a) {
+  ; RV32-LABEL: name: test_args_nxv4i32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8m2
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv4i32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8m2
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv8i32(<vscale x 8 x i32> %a) {
+  ; RV32-LABEL: name: test_args_nxv8i32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8m4
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv8i32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8m4
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv16i32(<vscale x 16 x i32> %a) {
+  ; RV32-LABEL: name: test_args_nxv16i32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8m8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8m8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv16i32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8m8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8m8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv1i64(<vscale x 1 x i64> %a) {
+  ; RV32-LABEL: name: test_args_nxv1i64
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv1i64
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv2i64(<vscale x 2 x i64> %a) {
+  ; RV32-LABEL: name: test_args_nxv2i64
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8m2
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv2i64
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8m2
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv4i64(<vscale x 4 x i64> %a) {
+  ; RV32-LABEL: name: test_args_nxv4i64
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8m4
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8m4
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv4i64
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8m4
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8m4
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv8i64(<vscale x 8 x i64> %a) {
+  ; RV32-LABEL: name: test_args_nxv8i64
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8m8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8m8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv8i64
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8m8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8m8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv64i1(<vscale x 64 x i1> %a) {
+  ; RV32-LABEL: name: test_args_nxv64i1
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 64 x s1>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv64i1
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 64 x s1>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv32i1(<vscale x 32 x i1> %a) {
+  ; RV32-LABEL: name: test_args_nxv32i1
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 32 x s1>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv32i1
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 32 x s1>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv16i1(<vscale x 16 x i1> %a) {
+  ; RV32-LABEL: name: test_args_nxv16i1
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv16i1
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv8i1(<vscale x 8 x i1> %a) {
+  ; RV32-LABEL: name: test_args_nxv8i1
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv8i1
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv4i1(<vscale x 4 x i1> %a) {
+  ; RV32-LABEL: name: test_args_nxv4i1
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv4i1
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv2i1(<vscale x 2 x i1> %a) {
+  ; RV32-LABEL: name: test_args_nxv2i1
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv2i1
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv1i1(<vscale x 1 x i1> %a) {
+  ; RV32-LABEL: name: test_args_nxv1i1
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv1i1
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv1f32(<vscale x 1 x float> %a) {
+  ; RV32-LABEL: name: test_args_nxv1f32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv1f32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv2f32(<vscale x 2 x float> %a) {
+  ; RV32-LABEL: name: test_args_nxv2f32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv2f32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv4f32(<vscale x 4 x float> %a) {
+  ; RV32-LABEL: name: test_args_nxv4f32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8m2
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv4f32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8m2
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv8f32(<vscale x 8 x float> %a) {
+  ; RV32-LABEL: name: test_args_nxv8f32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8m4
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv8f32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8m4
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv16f32(<vscale x 16 x float> %a) {
+  ; RV32-LABEL: name: test_args_nxv16f32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8m8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8m8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv16f32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8m8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8m8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv1f64(<vscale x 1 x double> %a) {
+  ; RV32-LABEL: name: test_args_nxv1f64
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv1f64
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv2f64(<vscale x 2 x double> %a) {
+  ; RV32-LABEL: name: test_args_nxv2f64
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8m2
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv2f64
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8m2
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv4f64(<vscale x 4 x double> %a) {
+  ; RV32-LABEL: name: test_args_nxv4f64
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8m4
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8m4
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv4f64
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8m4
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8m4
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv8f64(<vscale x 8 x double> %a) {
+  ; RV32-LABEL: name: test_args_nxv8f64
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8m8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8m8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv8f64
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8m8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8m8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv1f16(<vscale x 1 x half> %a) {
+  ; RV32-LABEL: name: test_args_nxv1f16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $x10
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_BUILD_VECTOR [[COPY]](s32)
+  ; RV32-NEXT:   [[TRUNC:%[0-9]+]]:_(<vscale x 1 x s16>) = G_TRUNC [[BUILD_VECTOR]](<vscale x 1 x s32>)
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv1f16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $x10
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_BUILD_VECTOR [[COPY]](s64)
+  ; RV64-NEXT:   [[TRUNC:%[0-9]+]]:_(<vscale x 1 x s16>) = G_TRUNC [[BUILD_VECTOR]](<vscale x 1 x s64>)
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv2f16(<vscale x 2 x half> %a) {
+  ; RV32-LABEL: name: test_args_nxv2f16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $x10, $x11
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+  ; RV32-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32)
+  ; RV32-NEXT:   [[TRUNC:%[0-9]+]]:_(<vscale x 2 x s16>) = G_TRUNC [[BUILD_VECTOR]](<vscale x 2 x s32>)
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv2f16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $x10, $x11
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+  ; RV64-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_BUILD_VECTOR [[COPY]](s64), [[COPY1]](s64)
+  ; RV64-NEXT:   [[TRUNC:%[0-9]+]]:_(<vscale x 2 x s16>) = G_TRUNC [[BUILD_VECTOR]](<vscale x 2 x s64>)
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv4f16(<vscale x 4 x half> %a) {
+  ; RV32-LABEL: name: test_args_nxv4f16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $x10
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32)
+  ; RV32-NEXT:   [[TRUNC:%[0-9]+]]:_(<vscale x 4 x s16>) = G_TRUNC [[BUILD_VECTOR]](<vscale x 4 x s32>)
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv4f16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $x10
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY2:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY3:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_BUILD_VECTOR [[COPY]](s64), [[COPY1]](s64), [[COPY2]](s64), [[COPY3]](s64)
+  ; RV64-NEXT:   [[TRUNC:%[0-9]+]]:_(<vscale x 4 x s16>) = G_TRUNC [[BUILD_VECTOR]](<vscale x 4 x s64>)
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv8f16(<vscale x 8 x half> %a) {
+  ; RV32-LABEL: name: test_args_nxv8f16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $x10
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY5:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY6:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
+  ; RV32-NEXT:   [[TRUNC:%[0-9]+]]:_(<vscale x 8 x s16>) = G_TRUNC [[BUILD_VECTOR]](<vscale x 8 x s32>)
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv8f16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $x10
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY2:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY3:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY4:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY5:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_BUILD_VECTOR [[COPY]](s64), [[COPY1]](s64), [[COPY2]](s64), [[COPY3]](s64), [[COPY4]](s64), [[COPY5]](s64), [[COPY6]](s64), [[COPY7]](s64)
+  ; RV64-NEXT:   [[TRUNC:%[0-9]+]]:_(<vscale x 8 x s16>) = G_TRUNC [[BUILD_VECTOR]](<vscale x 8 x s64>)
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv16f16(<vscale x 16 x half> %a) {
+  ; RV32-LABEL: name: test_args_nxv16f16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $x10
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY5:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY6:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY8:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY9:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY10:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY11:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY12:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY13:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY14:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY15:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32), [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32)
+  ; RV32-NEXT:   [[TRUNC:%[0-9]+]]:_(<vscale x 16 x s16>) = G_TRUNC [[BUILD_VECTOR]](<vscale x 16 x s32>)
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv16f16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $x10
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY2:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY3:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY4:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY5:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY8:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY9:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY10:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY11:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY12:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY13:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY14:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY15:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<vscale x 16 x s64>) = G_BUILD_VECTOR [[COPY]](s64), [[COPY1]](s64), [[COPY2]](s64), [[COPY3]](s64), [[COPY4]](s64), [[COPY5]](s64), [[COPY6]](s64), [[COPY7]](s64), [[COPY8]](s64), [[COPY9]](s64), [[COPY10]](s64), [[COPY11]](s64), [[COPY12]](s64), [[COPY13]](s64), [[COPY14]](s64), [[COPY15]](s64)
+  ; RV64-NEXT:   [[TRUNC:%[0-9]+]]:_(<vscale x 16 x s16>) = G_TRUNC [[BUILD_VECTOR]](<vscale x 16 x s64>)
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv32f16(<vscale x 32 x half> %a) {
+  ; RV32-LABEL: name: test_args_nxv32f16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $x10
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY5:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY6:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY8:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY9:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY10:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY11:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY12:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY13:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY14:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY15:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY16:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY17:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY18:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY19:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY20:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY21:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY22:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY23:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY24:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY25:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY26:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY27:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY28:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY29:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY30:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[COPY31:%[0-9]+]]:_(s32) = COPY $x10
+  ; RV32-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<vscale x 32 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32), [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32), [[COPY16]](s32), [[COPY17]](s32), [[COPY18]](s32), [[COPY19]](s32), [[COPY20]](s32), [[COPY21]](s32), [[COPY22]](s32), [[COPY23]](s32), [[COPY24]](s32), [[COPY25]](s32), [[COPY26]](s32), [[COPY27]](s32), [[COPY28]](s32), [[COPY29]](s32), [[COPY30]](s32), [[COPY31]](s32)
+  ; RV32-NEXT:   [[TRUNC:%[0-9]+]]:_(<vscale x 32 x s16>) = G_TRUNC [[BUILD_VECTOR]](<vscale x 32 x s32>)
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv32f16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $x10
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY2:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY3:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY4:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY5:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY6:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY7:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY8:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY9:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY10:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY11:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY12:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY13:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY14:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY15:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY16:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY17:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY18:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY19:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY20:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY21:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY22:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY23:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY24:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY25:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY26:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY27:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY28:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY29:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY30:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[COPY31:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<vscale x 32 x s64>) = G_BUILD_VECTOR [[COPY]](s64), [[COPY1]](s64), [[COPY2]](s64), [[COPY3]](s64), [[COPY4]](s64), [[COPY5]](s64), [[COPY6]](s64), [[COPY7]](s64), [[COPY8]](s64), [[COPY9]](s64), [[COPY10]](s64), [[COPY11]](s64), [[COPY12]](s64), [[COPY13]](s64), [[COPY14]](s64), [[COPY15]](s64), [[COPY16]](s64), [[COPY17]](s64), [[COPY18]](s64), [[COPY19]](s64), [[COPY20]](s64), [[COPY21]](s64), [[COPY22]](s64), [[COPY23]](s64), [[COPY24]](s64), [[COPY25]](s64), [[COPY26]](s64), [[COPY27]](s64), [[COPY28]](s64), [[COPY29]](s64), [[COPY30]](s64), [[COPY31]](s64)
+  ; RV64-NEXT:   [[TRUNC:%[0-9]+]]:_(<vscale x 32 x s16>) = G_TRUNC [[BUILD_VECTOR]](<vscale x 32 x s64>)
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv1b16(<vscale x 1 x bfloat> %a) {
+  ; RV32-LABEL: name: test_args_nxv1b16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $f10_f
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $f10_f
+  ; RV32-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_BUILD_VECTOR [[COPY]](s32)
----------------
topperc wrote:

The caller would have also had a `<vscale x 1 x s32>` but only passed 1 value in f10_f to this function so the caller dropped the rest of the vector. That's bad.

In SelectionDAG, BUILD_VECTOR does not support scalable vectors.

In SelectinDAG, you pass a bfloat16 scalable vector without support for bf16 vectors the compiler crashes or throws an error. We could potentially bitcast it to a different scalable vector with the same total size, but we definitely cannot scalarlize it.

https://github.com/llvm/llvm-project/pull/70882


More information about the llvm-commits mailing list