[llvm] 3967510 - [RISCV][GISel] First mask argument placed in v0 according to RISCV Ve… (#79343)
via llvm-commits
llvm-commits at lists.llvm.org
Wed Jan 24 13:03:42 PST 2024
Author: Michael Maitland
Date: 2024-01-24T16:03:38-05:00
New Revision: 3967510032bc50062128e9ec078d930d7d5151ce
URL: https://github.com/llvm/llvm-project/commit/3967510032bc50062128e9ec078d930d7d5151ce
DIFF: https://github.com/llvm/llvm-project/commit/3967510032bc50062128e9ec078d930d7d5151ce.diff
LOG: [RISCV][GISel] First mask argument placed in v0 according to RISCV Ve… (#79343)
…ctor CC.
Added:
Modified:
llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-args.ll
llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
index 26eac17ed24c9f3..45e19cdea300b19 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
@@ -34,6 +34,9 @@ struct RISCVOutgoingValueAssigner : public CallLowering::OutgoingValueAssigner {
// Whether this is assigning args for a return.
bool IsRet;
+ // true if assignArg has been called for a mask argument, false otherwise.
+ bool AssignedFirstMaskArg = false;
+
public:
RISCVOutgoingValueAssigner(
RISCVTargetLowering::RISCVCCAssignFn *RISCVAssignFn_, bool IsRet)
@@ -48,10 +51,16 @@ struct RISCVOutgoingValueAssigner : public CallLowering::OutgoingValueAssigner {
const DataLayout &DL = MF.getDataLayout();
const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
+ std::optional<unsigned> FirstMaskArgument;
+ if (Subtarget.hasVInstructions() && !AssignedFirstMaskArg &&
+ ValVT.isVector() && ValVT.getVectorElementType() == MVT::i1) {
+ FirstMaskArgument = ValNo;
+ AssignedFirstMaskArg = true;
+ }
+
if (RISCVAssignFn(DL, Subtarget.getTargetABI(), ValNo, ValVT, LocVT,
LocInfo, Flags, State, Info.IsFixed, IsRet, Info.Ty,
- *Subtarget.getTargetLowering(),
- /*FirstMaskArgument=*/std::nullopt))
+ *Subtarget.getTargetLowering(), FirstMaskArgument))
return true;
StackSize = State.getStackSize();
@@ -172,6 +181,9 @@ struct RISCVIncomingValueAssigner : public CallLowering::IncomingValueAssigner {
// Whether this is assigning args from a return.
bool IsRet;
+ // true if assignArg has been called for a mask argument, false otherwise.
+ bool AssignedFirstMaskArg = false;
+
public:
RISCVIncomingValueAssigner(
RISCVTargetLowering::RISCVCCAssignFn *RISCVAssignFn_, bool IsRet)
@@ -189,10 +201,16 @@ struct RISCVIncomingValueAssigner : public CallLowering::IncomingValueAssigner {
if (LocVT.isScalableVector())
MF.getInfo<RISCVMachineFunctionInfo>()->setIsVectorCall();
+ std::optional<unsigned> FirstMaskArgument;
+ if (Subtarget.hasVInstructions() && !AssignedFirstMaskArg &&
+ ValVT.isVector() && ValVT.getVectorElementType() == MVT::i1) {
+ FirstMaskArgument = ValNo;
+ AssignedFirstMaskArg = true;
+ }
+
if (RISCVAssignFn(DL, Subtarget.getTargetABI(), ValNo, ValVT, LocVT,
LocInfo, Flags, State, /*IsFixed=*/true, IsRet, Info.Ty,
- *Subtarget.getTargetLowering(),
- /*FirstMaskArgument=*/std::nullopt))
+ *Subtarget.getTargetLowering(), FirstMaskArgument))
return true;
StackSize = State.getStackSize();
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-args.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-args.ll
index 4df0a8f48cc8d0b..3c4cfaef4d5841d 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-args.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-args.ll
@@ -407,16 +407,16 @@ entry:
define void @test_args_nxv64i1(<vscale x 64 x i1> %a) {
; RV32-LABEL: name: test_args_nxv64i1
; RV32: bb.1.entry:
- ; RV32-NEXT: liveins: $v8
+ ; RV32-NEXT: liveins: $v0
; RV32-NEXT: {{ $}}
- ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 64 x s1>) = COPY $v8
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 64 x s1>) = COPY $v0
; RV32-NEXT: PseudoRET
;
; RV64-LABEL: name: test_args_nxv64i1
; RV64: bb.1.entry:
- ; RV64-NEXT: liveins: $v8
+ ; RV64-NEXT: liveins: $v0
; RV64-NEXT: {{ $}}
- ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 64 x s1>) = COPY $v8
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 64 x s1>) = COPY $v0
; RV64-NEXT: PseudoRET
entry:
ret void
@@ -425,16 +425,16 @@ entry:
define void @test_args_nxv32i1(<vscale x 32 x i1> %a) {
; RV32-LABEL: name: test_args_nxv32i1
; RV32: bb.1.entry:
- ; RV32-NEXT: liveins: $v8
+ ; RV32-NEXT: liveins: $v0
; RV32-NEXT: {{ $}}
- ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 32 x s1>) = COPY $v8
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 32 x s1>) = COPY $v0
; RV32-NEXT: PseudoRET
;
; RV64-LABEL: name: test_args_nxv32i1
; RV64: bb.1.entry:
- ; RV64-NEXT: liveins: $v8
+ ; RV64-NEXT: liveins: $v0
; RV64-NEXT: {{ $}}
- ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 32 x s1>) = COPY $v8
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 32 x s1>) = COPY $v0
; RV64-NEXT: PseudoRET
entry:
ret void
@@ -443,16 +443,16 @@ entry:
define void @test_args_nxv16i1(<vscale x 16 x i1> %a) {
; RV32-LABEL: name: test_args_nxv16i1
; RV32: bb.1.entry:
- ; RV32-NEXT: liveins: $v8
+ ; RV32-NEXT: liveins: $v0
; RV32-NEXT: {{ $}}
- ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v8
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v0
; RV32-NEXT: PseudoRET
;
; RV64-LABEL: name: test_args_nxv16i1
; RV64: bb.1.entry:
- ; RV64-NEXT: liveins: $v8
+ ; RV64-NEXT: liveins: $v0
; RV64-NEXT: {{ $}}
- ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v8
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 16 x s1>) = COPY $v0
; RV64-NEXT: PseudoRET
entry:
ret void
@@ -461,16 +461,16 @@ entry:
define void @test_args_nxv8i1(<vscale x 8 x i1> %a) {
; RV32-LABEL: name: test_args_nxv8i1
; RV32: bb.1.entry:
- ; RV32-NEXT: liveins: $v8
+ ; RV32-NEXT: liveins: $v0
; RV32-NEXT: {{ $}}
- ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v8
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v0
; RV32-NEXT: PseudoRET
;
; RV64-LABEL: name: test_args_nxv8i1
; RV64: bb.1.entry:
- ; RV64-NEXT: liveins: $v8
+ ; RV64-NEXT: liveins: $v0
; RV64-NEXT: {{ $}}
- ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v8
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 8 x s1>) = COPY $v0
; RV64-NEXT: PseudoRET
entry:
ret void
@@ -479,16 +479,16 @@ entry:
define void @test_args_nxv4i1(<vscale x 4 x i1> %a) {
; RV32-LABEL: name: test_args_nxv4i1
; RV32: bb.1.entry:
- ; RV32-NEXT: liveins: $v8
+ ; RV32-NEXT: liveins: $v0
; RV32-NEXT: {{ $}}
- ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v8
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v0
; RV32-NEXT: PseudoRET
;
; RV64-LABEL: name: test_args_nxv4i1
; RV64: bb.1.entry:
- ; RV64-NEXT: liveins: $v8
+ ; RV64-NEXT: liveins: $v0
; RV64-NEXT: {{ $}}
- ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v8
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 4 x s1>) = COPY $v0
; RV64-NEXT: PseudoRET
entry:
ret void
@@ -497,16 +497,16 @@ entry:
define void @test_args_nxv2i1(<vscale x 2 x i1> %a) {
; RV32-LABEL: name: test_args_nxv2i1
; RV32: bb.1.entry:
- ; RV32-NEXT: liveins: $v8
+ ; RV32-NEXT: liveins: $v0
; RV32-NEXT: {{ $}}
- ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v8
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v0
; RV32-NEXT: PseudoRET
;
; RV64-LABEL: name: test_args_nxv2i1
; RV64: bb.1.entry:
- ; RV64-NEXT: liveins: $v8
+ ; RV64-NEXT: liveins: $v0
; RV64-NEXT: {{ $}}
- ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v8
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v0
; RV64-NEXT: PseudoRET
entry:
ret void
@@ -515,16 +515,16 @@ entry:
define void @test_args_nxv1i1(<vscale x 1 x i1> %a) {
; RV32-LABEL: name: test_args_nxv1i1
; RV32: bb.1.entry:
- ; RV32-NEXT: liveins: $v8
+ ; RV32-NEXT: liveins: $v0
; RV32-NEXT: {{ $}}
- ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v8
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0
; RV32-NEXT: PseudoRET
;
; RV64-LABEL: name: test_args_nxv1i1
; RV64: bb.1.entry:
- ; RV64-NEXT: liveins: $v8
+ ; RV64-NEXT: liveins: $v0
; RV64-NEXT: {{ $}}
- ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v8
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0
; RV64-NEXT: PseudoRET
entry:
ret void
@@ -907,3 +907,63 @@ define void @test_args_nxv32b16(<vscale x 32 x bfloat> %a) {
entry:
ret void
}
+
+define void @test_args_nxv1i1_nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b) {
+ ; RV32-LABEL: name: test_args_nxv1i1_nxv1i1
+ ; RV32: bb.1.entry:
+ ; RV32-NEXT: liveins: $v0, $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0
+ ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v8
+ ; RV32-NEXT: PseudoRET
+ ;
+ ; RV64-LABEL: name: test_args_nxv1i1_nxv1i1
+ ; RV64: bb.1.entry:
+ ; RV64-NEXT: liveins: $v0, $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0
+ ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v8
+ ; RV64-NEXT: PseudoRET
+entry:
+ ret void
+}
+
+define void @test_args_nxv1i1_nxv1i32(<vscale x 1 x i1> %a, <vscale x 1 x i32> %b) {
+ ; RV32-LABEL: name: test_args_nxv1i1_nxv1i32
+ ; RV32: bb.1.entry:
+ ; RV32-NEXT: liveins: $v0, $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0
+ ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+ ; RV32-NEXT: PseudoRET
+ ;
+ ; RV64-LABEL: name: test_args_nxv1i1_nxv1i32
+ ; RV64: bb.1.entry:
+ ; RV64-NEXT: liveins: $v0, $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0
+ ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+ ; RV64-NEXT: PseudoRET
+entry:
+ ret void
+}
+
+define void @test_args_nxv1i32_nxv1i1(<vscale x 1 x i32> %a, <vscale x 1 x i1> %b) {
+ ; RV32-LABEL: name: test_args_nxv1i32_nxv1i1
+ ; RV32: bb.1.entry:
+ ; RV32-NEXT: liveins: $v0, $v8
+ ; RV32-NEXT: {{ $}}
+ ; RV32-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+ ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0
+ ; RV32-NEXT: PseudoRET
+ ;
+ ; RV64-LABEL: name: test_args_nxv1i32_nxv1i1
+ ; RV64: bb.1.entry:
+ ; RV64-NEXT: liveins: $v0, $v8
+ ; RV64-NEXT: {{ $}}
+ ; RV64-NEXT: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+ ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s1>) = COPY $v0
+ ; RV64-NEXT: PseudoRET
+entry:
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret.ll
index eec9969063c87a5..6385baa38aecfb3 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret.ll
@@ -364,14 +364,14 @@ define <vscale x 64 x i1> @test_ret_nxv64i1() {
; RV32-LABEL: name: test_ret_nxv64i1
; RV32: bb.1.entry:
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
- ; RV32-NEXT: $v8 = COPY [[DEF]](<vscale x 64 x s1>)
- ; RV32-NEXT: PseudoRET implicit $v8
+ ; RV32-NEXT: $v0 = COPY [[DEF]](<vscale x 64 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
;
; RV64-LABEL: name: test_ret_nxv64i1
; RV64: bb.1.entry:
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 64 x s1>) = G_IMPLICIT_DEF
- ; RV64-NEXT: $v8 = COPY [[DEF]](<vscale x 64 x s1>)
- ; RV64-NEXT: PseudoRET implicit $v8
+ ; RV64-NEXT: $v0 = COPY [[DEF]](<vscale x 64 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
entry:
ret <vscale x 64 x i1> undef
}
@@ -380,14 +380,14 @@ define <vscale x 32 x i1> @test_ret_nxv32i1() {
; RV32-LABEL: name: test_ret_nxv32i1
; RV32: bb.1.entry:
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
- ; RV32-NEXT: $v8 = COPY [[DEF]](<vscale x 32 x s1>)
- ; RV32-NEXT: PseudoRET implicit $v8
+ ; RV32-NEXT: $v0 = COPY [[DEF]](<vscale x 32 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
;
; RV64-LABEL: name: test_ret_nxv32i1
; RV64: bb.1.entry:
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 32 x s1>) = G_IMPLICIT_DEF
- ; RV64-NEXT: $v8 = COPY [[DEF]](<vscale x 32 x s1>)
- ; RV64-NEXT: PseudoRET implicit $v8
+ ; RV64-NEXT: $v0 = COPY [[DEF]](<vscale x 32 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
entry:
ret <vscale x 32 x i1> undef
}
@@ -396,14 +396,14 @@ define <vscale x 16 x i1> @test_ret_nxv16i1() {
; RV32-LABEL: name: test_ret_nxv16i1
; RV32: bb.1.entry:
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
- ; RV32-NEXT: $v8 = COPY [[DEF]](<vscale x 16 x s1>)
- ; RV32-NEXT: PseudoRET implicit $v8
+ ; RV32-NEXT: $v0 = COPY [[DEF]](<vscale x 16 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
;
; RV64-LABEL: name: test_ret_nxv16i1
; RV64: bb.1.entry:
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 16 x s1>) = G_IMPLICIT_DEF
- ; RV64-NEXT: $v8 = COPY [[DEF]](<vscale x 16 x s1>)
- ; RV64-NEXT: PseudoRET implicit $v8
+ ; RV64-NEXT: $v0 = COPY [[DEF]](<vscale x 16 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
entry:
ret <vscale x 16 x i1> undef
}
@@ -412,14 +412,14 @@ define <vscale x 8 x i1> @test_ret_nxv8i1() {
; RV32-LABEL: name: test_ret_nxv8i1
; RV32: bb.1.entry:
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
- ; RV32-NEXT: $v8 = COPY [[DEF]](<vscale x 8 x s1>)
- ; RV32-NEXT: PseudoRET implicit $v8
+ ; RV32-NEXT: $v0 = COPY [[DEF]](<vscale x 8 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
;
; RV64-LABEL: name: test_ret_nxv8i1
; RV64: bb.1.entry:
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 8 x s1>) = G_IMPLICIT_DEF
- ; RV64-NEXT: $v8 = COPY [[DEF]](<vscale x 8 x s1>)
- ; RV64-NEXT: PseudoRET implicit $v8
+ ; RV64-NEXT: $v0 = COPY [[DEF]](<vscale x 8 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
entry:
ret <vscale x 8 x i1> undef
}
@@ -428,14 +428,14 @@ define <vscale x 4 x i1> @test_ret_nxv4i1() {
; RV32-LABEL: name: test_ret_nxv4i1
; RV32: bb.1.entry:
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
- ; RV32-NEXT: $v8 = COPY [[DEF]](<vscale x 4 x s1>)
- ; RV32-NEXT: PseudoRET implicit $v8
+ ; RV32-NEXT: $v0 = COPY [[DEF]](<vscale x 4 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
;
; RV64-LABEL: name: test_ret_nxv4i1
; RV64: bb.1.entry:
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 4 x s1>) = G_IMPLICIT_DEF
- ; RV64-NEXT: $v8 = COPY [[DEF]](<vscale x 4 x s1>)
- ; RV64-NEXT: PseudoRET implicit $v8
+ ; RV64-NEXT: $v0 = COPY [[DEF]](<vscale x 4 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
entry:
ret <vscale x 4 x i1> undef
}
@@ -444,14 +444,14 @@ define <vscale x 2 x i1> @test_ret_nxv2i1() {
; RV32-LABEL: name: test_ret_nxv2i1
; RV32: bb.1.entry:
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
- ; RV32-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s1>)
- ; RV32-NEXT: PseudoRET implicit $v8
+ ; RV32-NEXT: $v0 = COPY [[DEF]](<vscale x 2 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
;
; RV64-LABEL: name: test_ret_nxv2i1
; RV64: bb.1.entry:
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 2 x s1>) = G_IMPLICIT_DEF
- ; RV64-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s1>)
- ; RV64-NEXT: PseudoRET implicit $v8
+ ; RV64-NEXT: $v0 = COPY [[DEF]](<vscale x 2 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
entry:
ret <vscale x 2 x i1> undef
}
@@ -460,14 +460,14 @@ define <vscale x 1 x i1> @test_ret_nxv1i1() {
; RV32-LABEL: name: test_ret_nxv1i1
; RV32: bb.1.entry:
; RV32-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
- ; RV32-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s1>)
- ; RV32-NEXT: PseudoRET implicit $v8
+ ; RV32-NEXT: $v0 = COPY [[DEF]](<vscale x 1 x s1>)
+ ; RV32-NEXT: PseudoRET implicit $v0
;
; RV64-LABEL: name: test_ret_nxv1i1
; RV64: bb.1.entry:
; RV64-NEXT: [[DEF:%[0-9]+]]:_(<vscale x 1 x s1>) = G_IMPLICIT_DEF
- ; RV64-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s1>)
- ; RV64-NEXT: PseudoRET implicit $v8
+ ; RV64-NEXT: $v0 = COPY [[DEF]](<vscale x 1 x s1>)
+ ; RV64-NEXT: PseudoRET implicit $v0
entry:
ret <vscale x 1 x i1> undef
}
More information about the llvm-commits
mailing list