[llvm] [RISCV][GlobalISel] Vector Extension vadd Legalizer (PR #71400)

via llvm-commits llvm-commits at lists.llvm.org
Mon Nov 6 08:23:10 PST 2023


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-llvm-globalisel

Author: Jiahan Xie (jiahanxie353)

<details>
<summary>Changes</summary>

Hi,

I'm excited to share thisa draft PR for introducing support for the RISC-V Vector Extension `vadd` instruction for the GlobalISel framework. Given that this is my fist time contributing to the LLVM project, I am keen on leveraging this as an opportunity to spark early discussions and solicit constructive feedback.

I'm working on legalization and have [this super minimum](https://github.com/jiahanxie353/llvm-project/blob/a20ba10083f3345a07722f2494f80f128e3f57e4/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv32/legalize-vadd.mir#L9) test case. Firstly, I though I want to verify that my test cases themselves are correct. The way I want to verify that they are correct is by using `SelectionDAG` to run on them and return successfully.
However, when I run `llc -mtriple=riscv32 -mattr=+v -debug-only=isel -view-legalize-dags  legalize-vadd.mir`, it reports:
```
0. Program arguments: ../../../../../../build/bin/llc -mtriple=riscv32 -mattr=+v -debug-only=isel -view-legalize-dags ./rv32/legalize-vadd.mir
1. Running pass 'Function Pass Manager' on module './rv32/legalize-vadd.mir'.
2. Running pass 'Live Variable Analysis' on function '@<!-- -->test_vadd'
gio: file:///tmp/dag.test_vadd-e7c028.dot: No application is registered as handling this file
```
Is there any obvious mistake I made?

Any feedback and guidance are greatly appreciated! Thank you in advance for your time and assistance.

(I don't how to request reviewers after creating this draft PR so I'll just @ you here @<!-- -->michaelmaitland :)


---

Patch is 43.41 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/71400.diff


14 Files Affected:

- (modified) llvm/include/llvm/CodeGen/TargetRegisterInfo.h (+3-3) 
- (modified) llvm/lib/CodeGen/GlobalISel/CallLowering.cpp (+2-1) 
- (modified) llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp (+3-3) 
- (modified) llvm/lib/CodeGen/LowLevelType.cpp (+1-1) 
- (modified) llvm/lib/CodeGen/MachineVerifier.cpp (+17-6) 
- (modified) llvm/lib/CodeGen/TargetRegisterInfo.cpp (+9-10) 
- (modified) llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp (+36-2) 
- (modified) llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/fallback.ll (+2-2) 
- (added) llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-args-bf16-err.ll (+16) 
- (added) llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-args-f16-err.ll (+16) 
- (added) llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-args.ll (+909) 
- (added) llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rv32/legalize-vadd.mir (+10) 
- (added) llvm/test/CodeGen/RISCV/rvv/vadd-vv.ll (+14) 
- (added) llvm/test/MachineVerifier/copy-scalable.mir (+23) 


``````````diff
diff --git a/llvm/include/llvm/CodeGen/TargetRegisterInfo.h b/llvm/include/llvm/CodeGen/TargetRegisterInfo.h
index 337fab735a09522..4fb6ba7c26930af 100644
--- a/llvm/include/llvm/CodeGen/TargetRegisterInfo.h
+++ b/llvm/include/llvm/CodeGen/TargetRegisterInfo.h
@@ -283,8 +283,8 @@ class TargetRegisterInfo : public MCRegisterInfo {
   // DenseMapInfo<unsigned> uses -1u and -2u.
 
   /// Return the size in bits of a register from class RC.
-  unsigned getRegSizeInBits(const TargetRegisterClass &RC) const {
-    return getRegClassInfo(RC).RegSize;
+  TypeSize getRegSizeInBits(const TargetRegisterClass &RC) const {
+    return TypeSize::Fixed(getRegClassInfo(RC).RegSize);
   }
 
   /// Return the size in bytes of the stack slot allocated to hold a spilled
@@ -858,7 +858,7 @@ class TargetRegisterInfo : public MCRegisterInfo {
     const TargetRegisterClass *RC) const = 0;
 
   /// Returns size in bits of a phys/virtual/generic register.
-  unsigned getRegSizeInBits(Register Reg, const MachineRegisterInfo &MRI) const;
+  TypeSize getRegSizeInBits(Register Reg, const MachineRegisterInfo &MRI) const;
 
   /// Get the weight in units of pressure for this register unit.
   virtual unsigned getRegUnitWeight(unsigned RegUnit) const = 0;
diff --git a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
index 975787a8f5e734f..2527b1431289677 100644
--- a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
@@ -358,7 +358,7 @@ static void buildCopyFromRegs(MachineIRBuilder &B, ArrayRef<Register> OrigRegs,
   if (PartLLT.isVector() == LLTy.isVector() &&
       PartLLT.getScalarSizeInBits() > LLTy.getScalarSizeInBits() &&
       (!PartLLT.isVector() ||
-       PartLLT.getNumElements() == LLTy.getNumElements()) &&
+       PartLLT.getElementCount() == LLTy.getElementCount()) &&
       OrigRegs.size() == 1 && Regs.size() == 1) {
     Register SrcReg = Regs[0];
 
@@ -406,6 +406,7 @@ static void buildCopyFromRegs(MachineIRBuilder &B, ArrayRef<Register> OrigRegs,
     // If PartLLT is a mismatched vector in both number of elements and element
     // size, e.g. PartLLT == v2s64 and LLTy is v3s32, then first coerce it to
     // have the same elt type, i.e. v4s32.
+    // TODO: Extend this coersion to element multiples other than just 2.
     if (PartLLT.getSizeInBits() > LLTy.getSizeInBits() &&
         PartLLT.getScalarSizeInBits() == LLTy.getScalarSizeInBits() * 2 &&
         Regs.size() == 1) {
diff --git a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
index 5b4e2b725e1dd76..80e9c08e850b683 100644
--- a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
@@ -1065,16 +1065,16 @@ void MachineIRBuilder::validateTruncExt(const LLT DstTy, const LLT SrcTy,
 #ifndef NDEBUG
   if (DstTy.isVector()) {
     assert(SrcTy.isVector() && "mismatched cast between vector and non-vector");
-    assert(SrcTy.getNumElements() == DstTy.getNumElements() &&
+    assert(SrcTy.getElementCount() == DstTy.getElementCount() &&
            "different number of elements in a trunc/ext");
   } else
     assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc");
 
   if (IsExtend)
-    assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() &&
+    assert(TypeSize::isKnownGT(DstTy.getSizeInBits(), SrcTy.getSizeInBits()) &&
            "invalid narrowing extend");
   else
-    assert(DstTy.getSizeInBits() < SrcTy.getSizeInBits() &&
+    assert(TypeSize::isKnownLT(DstTy.getSizeInBits(), SrcTy.getSizeInBits()) &&
            "invalid widening trunc");
 #endif
 }
diff --git a/llvm/lib/CodeGen/LowLevelType.cpp b/llvm/lib/CodeGen/LowLevelType.cpp
index 24c30b756737b20..cd85bf606989f9e 100644
--- a/llvm/lib/CodeGen/LowLevelType.cpp
+++ b/llvm/lib/CodeGen/LowLevelType.cpp
@@ -17,7 +17,7 @@ using namespace llvm;
 
 LLT::LLT(MVT VT) {
   if (VT.isVector()) {
-    bool asVector = VT.getVectorMinNumElements() > 1;
+    bool asVector = VT.getVectorMinNumElements() > 1 || VT.isScalableVector();
     init(/*IsPointer=*/false, asVector, /*IsScalar=*/!asVector,
          VT.getVectorElementCount(), VT.getVectorElementType().getSizeInBits(),
          /*AddressSpace=*/0);
diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp
index dadaf60fa09da04..9ccda9abc019ddf 100644
--- a/llvm/lib/CodeGen/MachineVerifier.cpp
+++ b/llvm/lib/CodeGen/MachineVerifier.cpp
@@ -965,7 +965,7 @@ bool MachineVerifier::verifyVectorElementMatch(LLT Ty0, LLT Ty1,
     return false;
   }
 
-  if (Ty0.isVector() && Ty0.getNumElements() != Ty1.getNumElements()) {
+  if (Ty0.isVector() && Ty0.getElementCount() != Ty1.getElementCount()) {
     report("operand types must preserve number of vector elements", MI);
     return false;
   }
@@ -1937,8 +1937,8 @@ void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
 
     // If we have only one valid type, this is likely a copy between a virtual
     // and physical register.
-    unsigned SrcSize = 0;
-    unsigned DstSize = 0;
+    TypeSize SrcSize = TRI->getRegSizeInBits(SrcReg, *MRI);
+    TypeSize DstSize = TRI->getRegSizeInBits(DstReg, *MRI);
     if (SrcReg.isPhysical() && DstTy.isValid()) {
       const TargetRegisterClass *SrcRC =
           TRI->getMinimalPhysRegClassLLT(SrcReg, DstTy);
@@ -1946,7 +1946,7 @@ void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
         SrcSize = TRI->getRegSizeInBits(*SrcRC);
     }
 
-    if (SrcSize == 0)
+    if (SrcSize.isZero())
       SrcSize = TRI->getRegSizeInBits(SrcReg, *MRI);
 
     if (DstReg.isPhysical() && SrcTy.isValid()) {
@@ -1956,10 +1956,21 @@ void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
         DstSize = TRI->getRegSizeInBits(*DstRC);
     }
 
-    if (DstSize == 0)
+    if (DstSize.isZero())
       DstSize = TRI->getRegSizeInBits(DstReg, *MRI);
 
-    if (SrcSize != 0 && DstSize != 0 && SrcSize != DstSize) {
+    // If the Dst is scalable and the Src is fixed, then the Dst can only hold
+    // the Src if the minimum size Dst can hold is at least as big as Src.
+    if (DstSize.isScalable() && !SrcSize.isScalable() &&
+        DstSize.getKnownMinValue() <= SrcSize.getFixedValue())
+      break;
+    // If the Src is scalable and the Dst is fixed, then Dest can only hold
+    // the Src is known to fit in Dest
+    if (SrcSize.isScalable() && !DstSize.isScalable() &&
+        TypeSize::isKnownLE(DstSize, SrcSize))
+      break;
+
+    if (SrcSize.isNonZero() && DstSize.isNonZero() && SrcSize != DstSize) {
       if (!DstOp.getSubReg() && !SrcOp.getSubReg()) {
         report("Copy Instruction is illegal with mismatching sizes", MI);
         errs() << "Def Size = " << DstSize << ", Src Size = " << SrcSize
diff --git a/llvm/lib/CodeGen/TargetRegisterInfo.cpp b/llvm/lib/CodeGen/TargetRegisterInfo.cpp
index 1bb35f40facfd0f..c50b1cf9422717a 100644
--- a/llvm/lib/CodeGen/TargetRegisterInfo.cpp
+++ b/llvm/lib/CodeGen/TargetRegisterInfo.cpp
@@ -499,7 +499,7 @@ bool TargetRegisterInfo::regmaskSubsetEqual(const uint32_t *mask0,
   return true;
 }
 
-unsigned
+TypeSize
 TargetRegisterInfo::getRegSizeInBits(Register Reg,
                                      const MachineRegisterInfo &MRI) const {
   const TargetRegisterClass *RC{};
@@ -508,16 +508,15 @@ TargetRegisterInfo::getRegSizeInBits(Register Reg,
     // Instead, we need to access a register class that contains Reg and
     // get the size of that register class.
     RC = getMinimalPhysRegClass(Reg);
-  } else {
-    LLT Ty = MRI.getType(Reg);
-    unsigned RegSize = Ty.isValid() ? Ty.getSizeInBits() : 0;
-    // If Reg is not a generic register, query the register class to
-    // get its size.
-    if (RegSize)
-      return RegSize;
-    // Since Reg is not a generic register, it must have a register class.
-    RC = MRI.getRegClass(Reg);
+    assert(RC && "Unable to deduce the register class");
+    return getRegSizeInBits(*RC);
   }
+  LLT Ty = MRI.getType(Reg);
+  if (Ty.isValid())
+    return Ty.getSizeInBits();
+
+  // Since Reg is not a generic register, it may have a register class.
+  RC = MRI.getRegClass(Reg);
   assert(RC && "Unable to deduce the register class");
   return getRegSizeInBits(*RC);
 }
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
index a1dbc21ca364666..cbcdb54a0fcd2d1 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
@@ -14,6 +14,8 @@
 
 #include "RISCVCallLowering.h"
 #include "RISCVISelLowering.h"
+#include "RISCVMachineFunctionInfo.h"
+#include "RISCVMachineFunctionInfo.h"
 #include "RISCVSubtarget.h"
 #include "llvm/CodeGen/Analysis.h"
 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
@@ -185,6 +187,9 @@ struct RISCVIncomingValueAssigner : public CallLowering::IncomingValueAssigner {
     const DataLayout &DL = MF.getDataLayout();
     const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
 
+    if (LocVT.isScalableVector())
+      MF.getInfo<RISCVMachineFunctionInfo>()->setIsVectorCall();
+
     if (RISCVAssignFn(DL, Subtarget.getTargetABI(), ValNo, ValVT, LocVT,
                       LocInfo, Flags, State, /*IsFixed=*/true, IsRet, Info.Ty,
                       *Subtarget.getTargetLowering(),
@@ -301,8 +306,31 @@ struct RISCVCallReturnHandler : public RISCVIncomingValueHandler {
 RISCVCallLowering::RISCVCallLowering(const RISCVTargetLowering &TLI)
     : CallLowering(&TLI) {}
 
+/// Return true if scalable vector with ScalarTy is legal for lowering.
+static bool isLegalElementTypeForRVV(Type *EltTy,
+                                     const RISCVSubtarget &Subtarget) {
+  if (EltTy->isPointerTy())
+    return Subtarget.is64Bit() ? Subtarget.hasVInstructionsI64() : true;
+  if (EltTy->isIntegerTy(1) || EltTy->isIntegerTy(8) ||
+      EltTy->isIntegerTy(16) || EltTy->isIntegerTy(32))
+    return true;
+  if (EltTy->isIntegerTy(64))
+    return Subtarget.hasVInstructionsI64();
+  if (EltTy->isHalfTy())
+    return Subtarget.hasVInstructionsF16();
+  if (EltTy->isBFloatTy())
+    return Subtarget.hasVInstructionsBF16();
+  if (EltTy->isFloatTy())
+    return Subtarget.hasVInstructionsF32();
+  if (EltTy->isDoubleTy())
+    return Subtarget.hasVInstructionsF64();
+  return false;
+}
+
 // TODO: Support all argument types.
-static bool isSupportedArgumentType(Type *T, const RISCVSubtarget &Subtarget) {
+// TODO: Remove IsLowerArgs argument by adding support for vectors in lowerCall.
+static bool isSupportedArgumentType(Type *T, const RISCVSubtarget &Subtarget,
+                                    bool IsLowerArgs = false) {
   // TODO: Integers larger than 2*XLen are passed indirectly which is not
   // supported yet.
   if (T->isIntegerTy())
@@ -311,6 +339,11 @@ static bool isSupportedArgumentType(Type *T, const RISCVSubtarget &Subtarget) {
     return true;
   if (T->isPointerTy())
     return true;
+  // TODO: Support fixed vector types.
+  if (IsLowerArgs && T->isVectorTy() && Subtarget.hasVInstructions() &&
+      T->isScalableTy() &&
+      isLegalElementTypeForRVV(T->getScalarType(), Subtarget))
+    return true;
   return false;
 }
 
@@ -398,7 +431,8 @@ bool RISCVCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
   const RISCVSubtarget &Subtarget =
       MIRBuilder.getMF().getSubtarget<RISCVSubtarget>();
   for (auto &Arg : F.args()) {
-    if (!isSupportedArgumentType(Arg.getType(), Subtarget))
+    if (!isSupportedArgumentType(Arg.getType(), Subtarget,
+                                 /*IsLowerArgs=*/true))
       return false;
   }
 
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/fallback.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/fallback.ll
index 5dd62de8a6bc415..a3a913d8ce02d83 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/fallback.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/fallback.ll
@@ -22,7 +22,7 @@ entry:
   ret <vscale x 1 x i8> %a
 }
 
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to translate instruction{{.*}}scalable_inst
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to translate instruction: call:
 ; FALLBACK-WITH-REPORT-OUT-LABEL: scalable_inst
 define <vscale x 1 x i8> @scalable_inst(i64 %0) nounwind {
 entry:
@@ -35,7 +35,7 @@ entry:
   ret <vscale x 1 x i8> %a
 }
 
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to translate instruction{{.*}}scalable_alloca
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to translate instruction: alloca:
 ; FALLBACK-WITH-REPORT-OUT-LABEL: scalable_alloca
 define void @scalable_alloca() #1 {
   %local0 = alloca <vscale x 16 x i8>
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-args-bf16-err.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-args-bf16-err.ll
new file mode 100644
index 000000000000000..f39e7793e5d4f31
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-args-bf16-err.ll
@@ -0,0 +1,16 @@
+; RUN: not --crash llc -mtriple=riscv32 -mattr=+v -global-isel -stop-after=irtranslator \
+; RUN:   -verify-machineinstrs < %s 2>&1 | FileCheck %s
+; RUN: not --crash llc -mtriple=riscv64 -mattr=+v -global-isel -stop-after=irtranslator \
+; RUN:   -verify-machineinstrs < %s 2>&1 | FileCheck %s
+
+; The purpose of this test is to show that the compiler throws an error when
+; there is no support for bf16 vectors. If the compiler did not throw an error,
+; then it will try to scalarize the argument to an s32, which may drop elements.
+define void @test_args_nxv1bf16(<vscale x 1 x bfloat> %a) {
+entry:
+  ret void
+}
+
+; CHECK: LLVM ERROR: unable to lower arguments: ptr (in function: test_args_nxv1bf16)
+
+
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-args-f16-err.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-args-f16-err.ll
new file mode 100644
index 000000000000000..042b455bfb54754
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-args-f16-err.ll
@@ -0,0 +1,16 @@
+; RUN: not --crash llc -mtriple=riscv32 -mattr=+v -global-isel -stop-after=irtranslator \
+; RUN:   -verify-machineinstrs < %s 2>&1 | FileCheck %s
+; RUN: not --crash llc -mtriple=riscv64 -mattr=+v -global-isel -stop-after=irtranslator \
+; RUN:   -verify-machineinstrs < %s 2>&1 | FileCheck %s
+
+; The purpose of this test is to show that the compiler throws an error when
+; there is no support for f16 vectors. If the compiler did not throw an error,
+; then it will try to scalarize the argument to an s32, which may drop elements.
+define void @test_args_nxv1f16(<vscale x 1 x half> %a) {
+entry:
+  ret void
+}
+
+; CHECK: LLVM ERROR: unable to lower arguments: ptr (in function: test_args_nxv1f16)
+
+
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-args.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-args.ll
new file mode 100644
index 000000000000000..4df0a8f48cc8d0b
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-args.ll
@@ -0,0 +1,909 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfmin,+zvfh -global-isel -stop-after=irtranslator \
+; RUN:   -verify-machineinstrs < %s | FileCheck -check-prefixes=RV32 %s
+; RUN: llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfmin,+zvfh -global-isel -stop-after=irtranslator \
+; RUN:   -verify-machineinstrs < %s | FileCheck -check-prefixes=RV64 %s
+
+; ==========================================================================
+; ============================= Scalable Types =============================
+; ==========================================================================
+
+define void @test_args_nxv1i8(<vscale x 1 x i8> %a) {
+  ; RV32-LABEL: name: test_args_nxv1i8
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv1i8
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv2i8(<vscale x 2 x i8> %a) {
+  ; RV32-LABEL: name: test_args_nxv2i8
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv2i8
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv4i8(<vscale x 4 x i8> %a) {
+  ; RV32-LABEL: name: test_args_nxv4i8
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv4i8
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv8i8(<vscale x 8 x i8> %a) {
+  ; RV32-LABEL: name: test_args_nxv8i8
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv8i8
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv16i8(<vscale x 16 x i8> %a) {
+  ; RV32-LABEL: name: test_args_nxv16i8
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8m2
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv16i8
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8m2
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv32i8(<vscale x 32 x i8> %a) {
+  ; RV32-LABEL: name: test_args_nxv32i8
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8m4
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8m4
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv32i8
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8m4
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8m4
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv64i8(<vscale x 64 x i8> %a) {
+  ; RV32-LABEL: name: test_args_nxv64i8
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8m8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8m8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv64i8
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8m8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8m8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv1i16(<vscale x 1 x i16> %a) {
+  ; RV32-LABEL: name: test_args_nxv1i16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv1i16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv2i16(<vscale x 2 x i16> %a) {
+  ; RV32-LABEL: name: test_args_nxv2i16
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv2i16
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/71400


More information about the llvm-commits mailing list