[llvm] 991ecfb - [RISCV][GlobalISel] Add lowerReturn for calling conv

Nitin John Raj via llvm-commits llvm-commits at lists.llvm.org
Tue May 23 11:28:56 PDT 2023


Author: Nitin John Raj
Date: 2023-05-23T11:24:54-07:00
New Revision: 991ecfb83daf6fa303385c708874d60f7ded92ab

URL: https://github.com/llvm/llvm-project/commit/991ecfb83daf6fa303385c708874d60f7ded92ab
DIFF: https://github.com/llvm/llvm-project/commit/991ecfb83daf6fa303385c708874d60f7ded92ab.diff

LOG: [RISCV][GlobalISel] Add lowerReturn for calling conv

Add minimal support to lower return, and introduce an OutgoingValueHandler and an OutgoingValueAssigner for returns.

Supports return values with integer, pointer and aggregate types.

(Update of D69808 - avoiding commandeering that revision)

Co-authored By: lewis-revill

Reviewed By: arsenm

Differential Revision: https://reviews.llvm.org/D117318

Added: 
    llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/ret.ll

Modified: 
    llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
    llvm/lib/Target/RISCV/GISel/RISCVCallLowering.h
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/lib/Target/RISCV/RISCVISelLowering.h

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
index d265f3a12b7ff..7b39b1c9444d4 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
@@ -14,22 +14,115 @@
 
 #include "RISCVCallLowering.h"
 #include "RISCVISelLowering.h"
+#include "RISCVSubtarget.h"
+#include "llvm/CodeGen/Analysis.h"
 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
 
 using namespace llvm;
 
+namespace {
+
+struct RISCVOutgoingValueAssigner : public CallLowering::OutgoingValueAssigner {
+private:
+  // The function used internally to assign args - we ignore the AssignFn stored
+  // by OutgoingValueAssigner since RISC-V implements its CC using a custom
+  // function with a 
diff erent signature.
+  RISCVTargetLowering::RISCVCCAssignFn *RISCVAssignFn;
+
+  // Whether this is assigning args for a return.
+  bool IsRet;
+
+public:
+  RISCVOutgoingValueAssigner(
+      RISCVTargetLowering::RISCVCCAssignFn *RISCVAssignFn_, bool IsRet)
+      : CallLowering::OutgoingValueAssigner(nullptr),
+        RISCVAssignFn(RISCVAssignFn_), IsRet(IsRet) {}
+
+  bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,
+                 CCValAssign::LocInfo LocInfo,
+                 const CallLowering::ArgInfo &Info, ISD::ArgFlagsTy Flags,
+                 CCState &State) override {
+    MachineFunction &MF = State.getMachineFunction();
+    const DataLayout &DL = MF.getDataLayout();
+    const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
+
+    return RISCVAssignFn(DL, Subtarget.getTargetABI(), ValNo, ValVT, LocVT,
+                         LocInfo, Flags, State, /*IsFixed=*/true, IsRet,
+                         Info.Ty, *Subtarget.getTargetLowering(),
+                         /*FirstMaskArgument=*/std::nullopt);
+  }
+};
+
+struct RISCVOutgoingValueHandler : public CallLowering::OutgoingValueHandler {
+  RISCVOutgoingValueHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI,
+                            MachineInstrBuilder MIB)
+      : OutgoingValueHandler(B, MRI), MIB(MIB) {}
+
+  MachineInstrBuilder MIB;
+
+  Register getStackAddress(uint64_t MemSize, int64_t Offset,
+                           MachinePointerInfo &MPO,
+                           ISD::ArgFlagsTy Flags) override {
+    llvm_unreachable("not implemented");
+  }
+
+  void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy,
+                            MachinePointerInfo &MPO, CCValAssign &VA) override {
+    llvm_unreachable("not implemented");
+  }
+
+  void assignValueToReg(Register ValVReg, Register PhysReg,
+                        CCValAssign VA) override {
+    Register ExtReg = extendRegister(ValVReg, VA);
+    MIRBuilder.buildCopy(PhysReg, ExtReg);
+    MIB.addUse(PhysReg, RegState::Implicit);
+  }
+};
+
+} // namespace
+
 RISCVCallLowering::RISCVCallLowering(const RISCVTargetLowering &TLI)
     : CallLowering(&TLI) {}
 
+bool RISCVCallLowering::lowerReturnVal(MachineIRBuilder &MIRBuilder,
+                                       const Value *Val,
+                                       ArrayRef<Register> VRegs,
+                                       MachineInstrBuilder &Ret) const {
+  if (!Val)
+    return true;
+
+  // TODO: Only integer, pointer and aggregate types are supported now.
+  if (!Val->getType()->isIntOrPtrTy() && !Val->getType()->isAggregateType())
+    return false;
+
+  MachineFunction &MF = MIRBuilder.getMF();
+  const DataLayout &DL = MF.getDataLayout();
+  const Function &F = MF.getFunction();
+  CallingConv::ID CC = F.getCallingConv();
+
+  ArgInfo OrigRetInfo(VRegs, Val->getType(), 0);
+  setArgFlags(OrigRetInfo, AttributeList::ReturnIndex, DL, F);
+
+  SmallVector<ArgInfo, 4> SplitRetInfos;
+  splitToValueTypes(OrigRetInfo, SplitRetInfos, DL, CC);
+
+  RISCVOutgoingValueAssigner Assigner(
+      CC == CallingConv::Fast ? RISCV::CC_RISCV_FastCC : RISCV::CC_RISCV,
+      /*IsRet=*/true);
+  RISCVOutgoingValueHandler Handler(MIRBuilder, MF.getRegInfo(), Ret);
+  return determineAndHandleAssignments(Handler, Assigner, SplitRetInfos,
+                                       MIRBuilder, CC, F.isVarArg());
+}
+
 bool RISCVCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
                                     const Value *Val, ArrayRef<Register> VRegs,
                                     FunctionLoweringInfo &FLI) const {
-
+  assert(!Val == VRegs.empty() && "Return value without a vreg");
   MachineInstrBuilder Ret = MIRBuilder.buildInstrNoInsert(RISCV::PseudoRET);
 
-  if (Val != nullptr) {
+  if (!lowerReturnVal(MIRBuilder, Val, VRegs, Ret))
     return false;
-  }
+
   MIRBuilder.insertInstr(Ret);
   return true;
 }

diff  --git a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.h b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.h
index cd7fc4c76123f..d80a666f34894 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.h
+++ b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.h
@@ -16,10 +16,11 @@
 
 #include "llvm/CodeGen/CallingConvLower.h"
 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
-#include "llvm/CodeGen/ValueTypes.h"
 
 namespace llvm {
 
+class MachineInstrBuilder;
+class MachineIRBuilder;
 class RISCVTargetLowering;
 
 class RISCVCallLowering : public CallLowering {
@@ -37,6 +38,10 @@ class RISCVCallLowering : public CallLowering {
 
   bool lowerCall(MachineIRBuilder &MIRBuilder,
                  CallLoweringInfo &Info) const override;
+
+private:
+  bool lowerReturnVal(MachineIRBuilder &MIRBuilder, const Value *Val,
+                      ArrayRef<Register> VRegs, MachineInstrBuilder &Ret) const;
 };
 
 } // end namespace llvm

diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 87784be54ad44..766356b07bb2d 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -13722,7 +13722,7 @@ static unsigned allocateRVVReg(MVT ValVT, unsigned ValNo,
 }
 
 // Implements the RISC-V calling convention. Returns true upon failure.
-static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
+bool RISCV::CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
                      MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
                      ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
                      bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
@@ -14175,7 +14175,7 @@ static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
 
 // FastCC has less than 1% performance improvement for some particular
 // benchmark. But theoretically, it may has benenfit for some cases.
-static bool CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI,
+bool RISCV::CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI,
                             unsigned ValNo, MVT ValVT, MVT LocVT,
                             CCValAssign::LocInfo LocInfo,
                             ISD::ArgFlagsTy ArgFlags, CCState &State,
@@ -14277,7 +14277,7 @@ static bool CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI,
   return true; // CC didn't match.
 }
 
-static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
+bool RISCV::CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
                          CCValAssign::LocInfo LocInfo,
                          ISD::ArgFlagsTy ArgFlags, CCState &State) {
 
@@ -14371,11 +14371,11 @@ SDValue RISCVTargetLowering::LowerFormalArguments(
   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
 
   if (CallConv == CallingConv::GHC)
-    CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_GHC);
+    CCInfo.AnalyzeFormalArguments(Ins, RISCV::CC_RISCV_GHC);
   else
     analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false,
-                     CallConv == CallingConv::Fast ? CC_RISCV_FastCC
-                                                   : CC_RISCV);
+                     CallConv == CallingConv::Fast ? RISCV::CC_RISCV_FastCC
+                                                   : RISCV::CC_RISCV);
 
   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
     CCValAssign &VA = ArgLocs[i];
@@ -14576,11 +14576,11 @@ SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
   CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
 
   if (CallConv == CallingConv::GHC)
-    ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_GHC);
+    ArgCCInfo.AnalyzeCallOperands(Outs, RISCV::CC_RISCV_GHC);
   else
     analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI,
-                      CallConv == CallingConv::Fast ? CC_RISCV_FastCC
-                                                    : CC_RISCV);
+                      CallConv == CallingConv::Fast ? RISCV::CC_RISCV_FastCC
+                                                    : RISCV::CC_RISCV);
 
   // Check if it's really possible to do a tail call.
   if (IsTailCall)
@@ -14824,7 +14824,7 @@ SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
   // Assign locations to each value returned by this call.
   SmallVector<CCValAssign, 16> RVLocs;
   CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
-  analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true, CC_RISCV);
+  analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true, RISCV::CC_RISCV);
 
   // Copy all of the result registers out of their specified physreg.
   for (auto &VA : RVLocs) {
@@ -14867,7 +14867,7 @@ bool RISCVTargetLowering::CanLowerReturn(
     MVT VT = Outs[i].VT;
     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
-    if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full,
+    if (RISCV::CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full,
                  ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr,
                  *this, FirstMaskArgument))
       return false;
@@ -14892,7 +14892,7 @@ RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
                  *DAG.getContext());
 
   analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
-                    nullptr, CC_RISCV);
+                    nullptr, RISCV::CC_RISCV);
 
   if (CallConv == CallingConv::GHC && !RVLocs.empty())
     report_fatal_error("GHC functions return void only");

diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index 6bf3a811b2668..a8c1917c2b52b 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -735,7 +735,6 @@ class RISCVTargetLowering : public TargetLowering {
   bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
                              unsigned Factor) const override;
 
-private:
   /// RISCVCCAssignFn - This target-specific function extends the default
   /// CCValAssign with additional information used to lower RISC-V calling
   /// conventions.
@@ -747,6 +746,7 @@ class RISCVTargetLowering : public TargetLowering {
                                const RISCVTargetLowering &TLI,
                                std::optional<unsigned> FirstMaskArgument);
 
+private:
   void analyzeInputArgs(MachineFunction &MF, CCState &CCInfo,
                         const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet,
                         RISCVCCAssignFn Fn) const;
@@ -874,6 +874,26 @@ class RISCVTargetLowering : public TargetLowering {
   /// faster than two FDIVs.
   unsigned combineRepeatedFPDivisors() const override;
 };
+
+namespace RISCV {
+
+bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
+              MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
+              ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
+              bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
+              std::optional<unsigned> FirstMaskArgument);
+
+bool CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
+                     MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
+                     ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
+                     bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
+                     std::optional<unsigned> FirstMaskArgument);
+
+bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
+                  CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
+                  CCState &State);
+} // end namespace RISCV
+
 namespace RISCVVIntrinsicsTable {
 
 struct RISCVVIntrinsicInfo {

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/ret.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/ret.ll
new file mode 100644
index 0000000000000..1cd43f4f89b00
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/ret.ll
@@ -0,0 +1,136 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+; RUN: llc -mtriple=riscv32 -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV32I %s
+; RUN: llc -mtriple=riscv64 -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV64I %s
+
+define void @test_ret_void() {
+  ; RV32I-LABEL: name: test_ret_void
+  ; RV32I: bb.1.entry:
+  ; RV32I-NEXT:   PseudoRET
+  ; RV64I-LABEL: name: test_ret_void
+  ; RV64I: bb.1.entry:
+  ; RV64I-NEXT:   PseudoRET
+entry:
+  ret void
+}
+
+define i8 @test_ret_i8() {
+  ; RV32I-LABEL: name: test_ret_i8
+  ; RV32I: bb.1.entry:
+  ; RV32I-NEXT:   [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 1
+  ; RV32I-NEXT:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[C]](s8)
+  ; RV32I-NEXT:   $x10 = COPY [[ANYEXT]](s32)
+  ; RV32I-NEXT:   PseudoRET implicit $x10
+  ; RV64I-LABEL: name: test_ret_i8
+  ; RV64I: bb.1.entry:
+  ; RV64I-NEXT:   [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 1
+  ; RV64I-NEXT:   [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s8)
+  ; RV64I-NEXT:   $x10 = COPY [[ANYEXT]](s64)
+  ; RV64I-NEXT:   PseudoRET implicit $x10
+entry:
+  ret i8 1
+}
+
+define zeroext i8 @test_ret_i8_zext() {
+  ; RV32I-LABEL: name: test_ret_i8_zext
+  ; RV32I: bb.1.entry:
+  ; RV32I-NEXT:   [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 1
+  ; RV32I-NEXT:   [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[C]](s8)
+  ; RV32I-NEXT:   $x10 = COPY [[ZEXT]](s32)
+  ; RV32I-NEXT:   PseudoRET implicit $x10
+  ; RV64I-LABEL: name: test_ret_i8_zext
+  ; RV64I: bb.1.entry:
+  ; RV64I-NEXT:   [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 1
+  ; RV64I-NEXT:   [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[C]](s8)
+  ; RV64I-NEXT:   $x10 = COPY [[ZEXT]](s64)
+  ; RV64I-NEXT:   PseudoRET implicit $x10
+entry:
+  ret i8 1
+}
+
+define signext i16 @test_ret_i16_sext() {
+  ; RV32I-LABEL: name: test_ret_i16_sext
+  ; RV32I: bb.1.entry:
+  ; RV32I-NEXT:   [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 1
+  ; RV32I-NEXT:   [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[C]](s16)
+  ; RV32I-NEXT:   $x10 = COPY [[SEXT]](s32)
+  ; RV32I-NEXT:   PseudoRET implicit $x10
+  ; RV64I-LABEL: name: test_ret_i16_sext
+  ; RV64I: bb.1.entry:
+  ; RV64I-NEXT:   [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 1
+  ; RV64I-NEXT:   [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[C]](s16)
+  ; RV64I-NEXT:   $x10 = COPY [[SEXT]](s64)
+  ; RV64I-NEXT:   PseudoRET implicit $x10
+entry:
+  ret i16 1
+}
+
+define i32 @test_ret_i32() {
+  ; RV32I-LABEL: name: test_ret_i32
+  ; RV32I: bb.1.entry:
+  ; RV32I-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; RV32I-NEXT:   $x10 = COPY [[C]](s32)
+  ; RV32I-NEXT:   PseudoRET implicit $x10
+  ; RV64I-LABEL: name: test_ret_i32
+  ; RV64I: bb.1.entry:
+  ; RV64I-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; RV64I-NEXT:   [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+  ; RV64I-NEXT:   $x10 = COPY [[ANYEXT]](s64)
+  ; RV64I-NEXT:   PseudoRET implicit $x10
+entry:
+  ret i32 1
+}
+
+define i64 @test_ret_i64() {
+  ; RV32I-LABEL: name: test_ret_i64
+  ; RV32I: bb.1.entry:
+  ; RV32I-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967296
+  ; RV32I-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C]](s64)
+  ; RV32I-NEXT:   $x10 = COPY [[UV]](s32)
+  ; RV32I-NEXT:   $x11 = COPY [[UV1]](s32)
+  ; RV32I-NEXT:   PseudoRET implicit $x10, implicit $x11
+  ; RV64I-LABEL: name: test_ret_i64
+  ; RV64I: bb.1.entry:
+  ; RV64I-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967296
+  ; RV64I-NEXT:   $x10 = COPY [[C]](s64)
+  ; RV64I-NEXT:   PseudoRET implicit $x10
+entry:
+  ret i64 4294967296
+}
+
+define ptr @test_ret_ptr() {
+  ; RV32I-LABEL: name: test_ret_ptr
+  ; RV32I: bb.1.entry:
+  ; RV32I-NEXT:   [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF
+  ; RV32I-NEXT:   $x10 = COPY [[DEF]](p0)
+  ; RV32I-NEXT:   PseudoRET implicit $x10
+  ; RV64I-LABEL: name: test_ret_ptr
+  ; RV64I: bb.1.entry:
+  ; RV64I-NEXT:   [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF
+  ; RV64I-NEXT:   $x10 = COPY [[DEF]](p0)
+  ; RV64I-NEXT:   PseudoRET implicit $x10
+entry:
+  ret ptr undef
+}
+
+define [2 x i32] @test_ret_2xi32() {
+  ; RV32I-LABEL: name: test_ret_2xi32
+  ; RV32I: bb.1.entry:
+  ; RV32I-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; RV32I-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+  ; RV32I-NEXT:   $x10 = COPY [[C]](s32)
+  ; RV32I-NEXT:   $x11 = COPY [[C1]](s32)
+  ; RV32I-NEXT:   PseudoRET implicit $x10, implicit $x11
+  ; RV64I-LABEL: name: test_ret_2xi32
+  ; RV64I: bb.1.entry:
+  ; RV64I-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; RV64I-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+  ; RV64I-NEXT:   [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+  ; RV64I-NEXT:   [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
+  ; RV64I-NEXT:   $x10 = COPY [[ANYEXT]](s64)
+  ; RV64I-NEXT:   $x11 = COPY [[ANYEXT1]](s64)
+  ; RV64I-NEXT:   PseudoRET implicit $x10, implicit $x11
+entry:
+  ret [2 x i32] [i32 1, i32 2]
+}


        


More information about the llvm-commits mailing list