[llvm] [RISCV][GISel] First mask argument placed in v0 according to RISCV Ve… (PR #79343)

Michael Maitland via llvm-commits llvm-commits at lists.llvm.org
Wed Jan 24 10:38:52 PST 2024


https://github.com/michaelmaitland updated https://github.com/llvm/llvm-project/pull/79343

>From e254eec2de19f8fa42d5d5791c5748924bb0d298 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Wed, 24 Jan 2024 10:30:53 -0800
Subject: [PATCH 1/2] [RISCV][GISel] First mask argument placed in v0 according
 to RISCV Vector CC.

---
 .../Target/RISCV/GISel/RISCVCallLowering.cpp  |  25 +++-
 .../RISCV/GlobalISel/irtranslator/vec-args.ll | 116 +++++++++++++-----
 .../RISCV/GlobalISel/irtranslator/vec-ret.ll  |  56 ++++-----
 3 files changed, 138 insertions(+), 59 deletions(-)

diff --git a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
index 26eac17ed24c9f..89ce5b393e692a 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
@@ -34,6 +34,9 @@ struct RISCVOutgoingValueAssigner : public CallLowering::OutgoingValueAssigner {
   // Whether this is assigning args for a return.
   bool IsRet;
 
+  // true if assignArg has been called for a mask argument, false otherwise.
+  bool AssignedFirstMaskArg = false;
+
 public:
   RISCVOutgoingValueAssigner(
       RISCVTargetLowering::RISCVCCAssignFn *RISCVAssignFn_, bool IsRet)
@@ -48,10 +51,17 @@ struct RISCVOutgoingValueAssigner : public CallLowering::OutgoingValueAssigner {
     const DataLayout &DL = MF.getDataLayout();
     const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
 
+    std::optional<unsigned> FirstMaskArgument;
+    if (Subtarget.hasVInstructions() && !AssignedFirstMaskArg &&
+        ValVT.isVector() && ValVT.getVectorElementType() == MVT::i1) {
+      FirstMaskArgument = std::make_optional(ValNo);
+      AssignedFirstMaskArg = true;
+    }
+
     if (RISCVAssignFn(DL, Subtarget.getTargetABI(), ValNo, ValVT, LocVT,
                       LocInfo, Flags, State, Info.IsFixed, IsRet, Info.Ty,
                       *Subtarget.getTargetLowering(),
-                      /*FirstMaskArgument=*/std::nullopt))
+                      FirstMaskArgument))
       return true;
 
     StackSize = State.getStackSize();
@@ -172,6 +182,9 @@ struct RISCVIncomingValueAssigner : public CallLowering::IncomingValueAssigner {
   // Whether this is assigning args from a return.
   bool IsRet;
 
+  // true if assignArg has been called for a mask argument, false otherwise.
+  bool AssignedFirstMaskArg = false;
+
 public:
   RISCVIncomingValueAssigner(
       RISCVTargetLowering::RISCVCCAssignFn *RISCVAssignFn_, bool IsRet)
@@ -189,10 +202,16 @@ struct RISCVIncomingValueAssigner : public CallLowering::IncomingValueAssigner {
     if (LocVT.isScalableVector())
       MF.getInfo<RISCVMachineFunctionInfo>()->setIsVectorCall();
 
+    std::optional<unsigned> FirstMaskArgument;
+    if (Subtarget.hasVInstructions() && !AssignedFirstMaskArg &&
+        ValVT.isVector() && ValVT.getVectorElementType() == MVT::i1) {
+      FirstMaskArgument = std::make_optional(ValNo);
+      AssignedFirstMaskArg = true;
+    }
+
     if (RISCVAssignFn(DL, Subtarget.getTargetABI(), ValNo, ValVT, LocVT,
                       LocInfo, Flags, State, /*IsFixed=*/true, IsRet, Info.Ty,
-                      *Subtarget.getTargetLowering(),
-                      /*FirstMaskArgument=*/std::nullopt))
+                      *Subtarget.getTargetLowering(), FirstMaskArgument))
       return true;
 
     StackSize = State.getStackSize();
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-args.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-args.ll
index 4df0a8f48cc8d0..3c4cfaef4d5841 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-args.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-args.ll
@@ -407,16 +407,16 @@ entry:
 define void @test_args_nxv64i1(<vscale x 64 x i1> %a) {
   ; RV32-LABEL: name: test_args_nxv64i1
   ; RV32: bb.1.entry:
-  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT:   liveins: $v0
   ; RV32-NEXT: {{  $}}
-  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 64 x s1>) = COPY $v8
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 64 x s1>) = COPY $v0
   ; RV32-NEXT:   PseudoRET
   ;
   ; RV64-LABEL: name: test_args_nxv64i1
   ; RV64: bb.1.entry:
-  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT:   liveins: $v0
   ; RV64-NEXT: {{  $}}
-  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 64 x s1>) = COPY $v8
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 64 x s1>) = COPY $v0
   ; RV64-NEXT:   PseudoRET
 entry:
   ret void
@@ -425,16 +425,16 @@ entry:
 define void @test_args_nxv32i1(<vscale x 32 x i1> %a) {
   ; RV32-LABEL: name: test_args_nxv32i1
   ; RV32: bb.1.entry:
-  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT:   liveins: $v0
   ; RV32-NEXT: {{  $}}
-  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 32 x s1>) = COPY $v8
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 32 x s1>) = COPY $v0
   ; RV32-NEXT:   PseudoRET
   ;
   ; RV64-LABEL: name: test_args_nxv32i1
   ; RV64: bb.1.entry:
-  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT:   liveins: $v0
   ; RV64-NEXT: {{  $}}
-  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 32 x s1>) = COPY $v8
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 32 x s1>) = COPY $v0
   ; RV64-NEXT:   PseudoRET
 entry:
   ret void
@@ -443,16 +443,16 @@ entry:
 define void @test_args_nxv16i1(<vscale x 16 x i1> %a) {
   ; RV32-LABEL: name: test_args_nxv16i1
   ; RV32: bb.1.entry:
-  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT:   liveins: $v0
   ; RV32-NEXT: {{  $}}
-  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v8
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v0
   ; RV32-NEXT:   PseudoRET
   ;
   ; RV64-LABEL: name: test_args_nxv16i1
   ; RV64: bb.1.entry:
-  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT:   liveins: $v0
   ; RV64-NEXT: {{  $}}
-  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v8
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v0
   ; RV64-NEXT:   PseudoRET
 entry:
   ret void
@@ -461,16 +461,16 @@ entry:
 define void @test_args_nxv8i1(<vscale x 8 x i1> %a) {
   ; RV32-LABEL: name: test_args_nxv8i1
   ; RV32: bb.1.entry:
-  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT:   liveins: $v0
   ; RV32-NEXT: {{  $}}
-  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v8
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v0
   ; RV32-NEXT:   PseudoRET
   ;
   ; RV64-LABEL: name: test_args_nxv8i1
   ; RV64: bb.1.entry:
-  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT:   liveins: $v0
   ; RV64-NEXT: {{  $}}
-  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v8
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v0
   ; RV64-NEXT:   PseudoRET
 entry:
   ret void
@@ -479,16 +479,16 @@ entry:
 define void @test_args_nxv4i1(<vscale x 4 x i1> %a) {
   ; RV32-LABEL: name: test_args_nxv4i1
   ; RV32: bb.1.entry:
-  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT:   liveins: $v0
   ; RV32-NEXT: {{  $}}
-  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v8
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v0
   ; RV32-NEXT:   PseudoRET
   ;
   ; RV64-LABEL: name: test_args_nxv4i1
   ; RV64: bb.1.entry:
-  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT:   liveins: $v0
   ; RV64-NEXT: {{  $}}
-  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v8
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v0
   ; RV64-NEXT:   PseudoRET
 entry:
   ret void
@@ -497,16 +497,16 @@ entry:
 define void @test_args_nxv2i1(<vscale x 2 x i1> %a) {
   ; RV32-LABEL: name: test_args_nxv2i1
   ; RV32: bb.1.entry:
-  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT:   liveins: $v0
   ; RV32-NEXT: {{  $}}
-  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v8
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v0
   ; RV32-NEXT:   PseudoRET
   ;
   ; RV64-LABEL: name: test_args_nxv2i1
   ; RV64: bb.1.entry:
-  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT:   liveins: $v0
   ; RV64-NEXT: {{  $}}
-  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v8
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v0
   ; RV64-NEXT:   PseudoRET
 entry:
   ret void
@@ -515,16 +515,16 @@ entry:
 define void @test_args_nxv1i1(<vscale x 1 x i1> %a) {
   ; RV32-LABEL: name: test_args_nxv1i1
   ; RV32: bb.1.entry:
-  ; RV32-NEXT:   liveins: $v8
+  ; RV32-NEXT:   liveins: $v0
   ; RV32-NEXT: {{  $}}
-  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v8
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0
   ; RV32-NEXT:   PseudoRET
   ;
   ; RV64-LABEL: name: test_args_nxv1i1
   ; RV64: bb.1.entry:
-  ; RV64-NEXT:   liveins: $v8
+  ; RV64-NEXT:   liveins: $v0
   ; RV64-NEXT: {{  $}}
-  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v8
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0
   ; RV64-NEXT:   PseudoRET
 entry:
   ret void
@@ -907,3 +907,63 @@ define void @test_args_nxv32b16(<vscale x 32 x bfloat> %a) {
 entry:
   ret void
 }
+
+define void @test_args_nxv1i1_nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b) {
+  ; RV32-LABEL: name: test_args_nxv1i1_nxv1i1
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v0, $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0
+  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv1i1_nxv1i1
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v0, $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0
+  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv1i1_nxv1i32(<vscale x 1 x i1> %a, <vscale x 1 x i32> %b) {
+  ; RV32-LABEL: name: test_args_nxv1i1_nxv1i32
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v0, $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0
+  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv1i1_nxv1i32
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v0, $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0
+  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define void @test_args_nxv1i32_nxv1i1(<vscale x 1 x i32> %a, <vscale x 1 x i1> %b) {
+  ; RV32-LABEL: name: test_args_nxv1i32_nxv1i1
+  ; RV32: bb.1.entry:
+  ; RV32-NEXT:   liveins: $v0, $v8
+  ; RV32-NEXT: {{  $}}
+  ; RV32-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+  ; RV32-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0
+  ; RV32-NEXT:   PseudoRET
+  ;
+  ; RV64-LABEL: name: test_args_nxv1i32_nxv1i1
+  ; RV64: bb.1.entry:
+  ; RV64-NEXT:   liveins: $v0, $v8
+  ; RV64-NEXT: {{  $}}
+  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0
+  ; RV64-NEXT:   PseudoRET
+entry:
+  ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret.ll
index eec9969063c87a..6385baa38aecfb 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret.ll
@@ -364,14 +364,14 @@ define <vscale x 64 x i1> @test_ret_nxv64i1() {
   ; RV32-LABEL: name: test_ret_nxv64i1
   ; RV32: bb.1.entry:
   ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
-  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 64 x s1>)
-  ; RV32-NEXT:   PseudoRET implicit $v8
+  ; RV32-NEXT:   $v0 = COPY [[DEF]](<vscale x 64 x s1>)
+  ; RV32-NEXT:   PseudoRET implicit $v0
   ;
   ; RV64-LABEL: name: test_ret_nxv64i1
   ; RV64: bb.1.entry:
   ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
-  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 64 x s1>)
-  ; RV64-NEXT:   PseudoRET implicit $v8
+  ; RV64-NEXT:   $v0 = COPY [[DEF]](<vscale x 64 x s1>)
+  ; RV64-NEXT:   PseudoRET implicit $v0
 entry:
   ret <vscale x 64 x i1> undef
 }
@@ -380,14 +380,14 @@ define <vscale x 32 x i1> @test_ret_nxv32i1() {
   ; RV32-LABEL: name: test_ret_nxv32i1
   ; RV32: bb.1.entry:
   ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
-  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 32 x s1>)
-  ; RV32-NEXT:   PseudoRET implicit $v8
+  ; RV32-NEXT:   $v0 = COPY [[DEF]](<vscale x 32 x s1>)
+  ; RV32-NEXT:   PseudoRET implicit $v0
   ;
   ; RV64-LABEL: name: test_ret_nxv32i1
   ; RV64: bb.1.entry:
   ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
-  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 32 x s1>)
-  ; RV64-NEXT:   PseudoRET implicit $v8
+  ; RV64-NEXT:   $v0 = COPY [[DEF]](<vscale x 32 x s1>)
+  ; RV64-NEXT:   PseudoRET implicit $v0
 entry:
   ret <vscale x 32 x i1> undef
 }
@@ -396,14 +396,14 @@ define <vscale x 16 x i1> @test_ret_nxv16i1() {
   ; RV32-LABEL: name: test_ret_nxv16i1
   ; RV32: bb.1.entry:
   ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
-  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 16 x s1>)
-  ; RV32-NEXT:   PseudoRET implicit $v8
+  ; RV32-NEXT:   $v0 = COPY [[DEF]](<vscale x 16 x s1>)
+  ; RV32-NEXT:   PseudoRET implicit $v0
   ;
   ; RV64-LABEL: name: test_ret_nxv16i1
   ; RV64: bb.1.entry:
   ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
-  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 16 x s1>)
-  ; RV64-NEXT:   PseudoRET implicit $v8
+  ; RV64-NEXT:   $v0 = COPY [[DEF]](<vscale x 16 x s1>)
+  ; RV64-NEXT:   PseudoRET implicit $v0
 entry:
   ret <vscale x 16 x i1> undef
 }
@@ -412,14 +412,14 @@ define <vscale x 8 x i1> @test_ret_nxv8i1() {
   ; RV32-LABEL: name: test_ret_nxv8i1
   ; RV32: bb.1.entry:
   ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
-  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 8 x s1>)
-  ; RV32-NEXT:   PseudoRET implicit $v8
+  ; RV32-NEXT:   $v0 = COPY [[DEF]](<vscale x 8 x s1>)
+  ; RV32-NEXT:   PseudoRET implicit $v0
   ;
   ; RV64-LABEL: name: test_ret_nxv8i1
   ; RV64: bb.1.entry:
   ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
-  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 8 x s1>)
-  ; RV64-NEXT:   PseudoRET implicit $v8
+  ; RV64-NEXT:   $v0 = COPY [[DEF]](<vscale x 8 x s1>)
+  ; RV64-NEXT:   PseudoRET implicit $v0
 entry:
   ret <vscale x 8 x i1> undef
 }
@@ -428,14 +428,14 @@ define <vscale x 4 x i1> @test_ret_nxv4i1() {
   ; RV32-LABEL: name: test_ret_nxv4i1
   ; RV32: bb.1.entry:
   ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
-  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 4 x s1>)
-  ; RV32-NEXT:   PseudoRET implicit $v8
+  ; RV32-NEXT:   $v0 = COPY [[DEF]](<vscale x 4 x s1>)
+  ; RV32-NEXT:   PseudoRET implicit $v0
   ;
   ; RV64-LABEL: name: test_ret_nxv4i1
   ; RV64: bb.1.entry:
   ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
-  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 4 x s1>)
-  ; RV64-NEXT:   PseudoRET implicit $v8
+  ; RV64-NEXT:   $v0 = COPY [[DEF]](<vscale x 4 x s1>)
+  ; RV64-NEXT:   PseudoRET implicit $v0
 entry:
   ret <vscale x 4 x i1> undef
 }
@@ -444,14 +444,14 @@ define <vscale x 2 x i1> @test_ret_nxv2i1() {
   ; RV32-LABEL: name: test_ret_nxv2i1
   ; RV32: bb.1.entry:
   ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
-  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 2 x s1>)
-  ; RV32-NEXT:   PseudoRET implicit $v8
+  ; RV32-NEXT:   $v0 = COPY [[DEF]](<vscale x 2 x s1>)
+  ; RV32-NEXT:   PseudoRET implicit $v0
   ;
   ; RV64-LABEL: name: test_ret_nxv2i1
   ; RV64: bb.1.entry:
   ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
-  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 2 x s1>)
-  ; RV64-NEXT:   PseudoRET implicit $v8
+  ; RV64-NEXT:   $v0 = COPY [[DEF]](<vscale x 2 x s1>)
+  ; RV64-NEXT:   PseudoRET implicit $v0
 entry:
   ret <vscale x 2 x i1> undef
 }
@@ -460,14 +460,14 @@ define <vscale x 1 x i1> @test_ret_nxv1i1() {
   ; RV32-LABEL: name: test_ret_nxv1i1
   ; RV32: bb.1.entry:
   ; RV32-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
-  ; RV32-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s1>)
-  ; RV32-NEXT:   PseudoRET implicit $v8
+  ; RV32-NEXT:   $v0 = COPY [[DEF]](<vscale x 1 x s1>)
+  ; RV32-NEXT:   PseudoRET implicit $v0
   ;
   ; RV64-LABEL: name: test_ret_nxv1i1
   ; RV64: bb.1.entry:
   ; RV64-NEXT:   [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
-  ; RV64-NEXT:   $v8 = COPY [[DEF]](<vscale x 1 x s1>)
-  ; RV64-NEXT:   PseudoRET implicit $v8
+  ; RV64-NEXT:   $v0 = COPY [[DEF]](<vscale x 1 x s1>)
+  ; RV64-NEXT:   PseudoRET implicit $v0
 entry:
   ret <vscale x 1 x i1> undef
 }

>From ab750c0eca658c23d1fcbd24e24d31ea3a0154c2 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Wed, 24 Jan 2024 10:38:38 -0800
Subject: [PATCH 2/2] !fixup clang-format

---
 llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
index 89ce5b393e692a..34b4d8e0a8692b 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
@@ -60,8 +60,7 @@ struct RISCVOutgoingValueAssigner : public CallLowering::OutgoingValueAssigner {
 
     if (RISCVAssignFn(DL, Subtarget.getTargetABI(), ValNo, ValVT, LocVT,
                       LocInfo, Flags, State, Info.IsFixed, IsRet, Info.Ty,
-                      *Subtarget.getTargetLowering(),
-                      FirstMaskArgument))
+                      *Subtarget.getTargetLowering(), FirstMaskArgument))
       return true;
 
     StackSize = State.getStackSize();



More information about the llvm-commits mailing list