[llvm] [RISCV][GISel] Use CCValAssign::getCustomReg for converting f16/f32<->GPR. (PR #105700)

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Thu Aug 22 10:08:23 PDT 2024


https://github.com/topperc created https://github.com/llvm/llvm-project/pull/105700

This gives us much better control of the generated code for GISel. I've added G_ versions of our SelectionDAG ISD nodes to make it better match what SelectionDAG generates. This prevents generating copies between register classes of different sizes.

SelectionDAG now checks needsCustom() instead of detecting the special cases in the Bitcast handler.

Unfortunately, IRTranslator for bitcast still generates copies between register classes of different sizes. Because of this we can't handle i16<->f16 bitcasts without crashing. Not sure if I should teach RISCVInstrInfo::copyPhysReg to allow copies between FPR16 and GPR or if I should convert the copies to instructions in GISel.

>From e5b03343cfa142cb96a43127639f1efe5eab3651 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Mon, 19 Aug 2024 20:37:01 -0700
Subject: [PATCH] [RISCV][GISel] Use CCValAssign::getCustomReg for converting
 f16/f32<->GPR.

This gives us much better control of the generated code for GISel.
I've added G_ versions of our SelectionDAG ISD nodes to make it better
match what SelectionDAG generates. This prevents generating copies between
register classes of different sizes.

SelectionDAG now checks needsCustom() instead of detecting the special
cases in the Bitcast handler.

Unfortunately, IRTranslator for bitcast still generates copies between
register classes of different sizes. Because of this we can't handle
i16<->f16 bitcasts without crashing. Not sure if I should teach
RISCVInstrInfo::copyPhysReg to allow copies between FPR16 and GPR or
if I should convert the copies to instructions in GISel.
---
 .../Target/RISCV/GISel/RISCVCallLowering.cpp  | 68 ++++++++++----
 .../RISCV/GISel/RISCVRegisterBankInfo.cpp     |  6 +-
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp   | 60 +++++++-----
 llvm/lib/Target/RISCV/RISCVInstrGISel.td      | 28 ++++++
 .../irtranslator/calling-conv-half.ll         | 29 +++---
 .../irtranslator/calling-conv-lp64.ll         | 92 ++++++++++++-------
 6 files changed, 196 insertions(+), 87 deletions(-)

diff --git a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
index b274a8fc45c5ce..84e6659f3fc994 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
@@ -103,15 +103,6 @@ struct RISCVOutgoingValueHandler : public CallLowering::OutgoingValueHandler {
 
   void assignValueToReg(Register ValVReg, Register PhysReg,
                         const CCValAssign &VA) override {
-    // If we're passing a smaller fp value into a larger integer register,
-    // anyextend before copying.
-    if ((VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) ||
-        ((VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::i64) &&
-         VA.getValVT() == MVT::f16)) {
-      LLT DstTy = LLT::scalar(VA.getLocVT().getSizeInBits());
-      ValVReg = MIRBuilder.buildAnyExt(DstTy, ValVReg).getReg(0);
-    }
-
     Register ExtReg = extendRegister(ValVReg, VA);
     MIRBuilder.buildCopy(PhysReg, ExtReg);
     MIB.addUse(PhysReg, RegState::Implicit);
@@ -120,16 +111,40 @@ struct RISCVOutgoingValueHandler : public CallLowering::OutgoingValueHandler {
   unsigned assignCustomValue(CallLowering::ArgInfo &Arg,
                              ArrayRef<CCValAssign> VAs,
                              std::function<void()> *Thunk) override {
+    const CCValAssign &VA = VAs[0];
+    if ((VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) ||
+        (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)) {
+      Register PhysReg = VA.getLocReg();
+
+      LLT LocTy(VA.getLocVT());
+      unsigned Opc = VA.getValVT() == MVT::f32 ? RISCV::G_FMV_X_ANYEXTW_RV64
+                                               : RISCV::G_FMV_X_ANYEXTH;
+
+      auto assignFunc = [=]() {
+        auto Fmv = MIRBuilder.buildInstr(Opc, {LocTy}, {Arg.Regs[0]});
+        Register NewReg = Fmv.getReg(0);
+        MIRBuilder.buildCopy(PhysReg, NewReg);
+        MIB.addUse(PhysReg, RegState::Implicit);
+      };
+
+      if (Thunk) {
+        *Thunk = assignFunc;
+        return 1;
+      }
+
+      assignFunc();
+      return 1;
+    }
+
     assert(VAs.size() >= 2 && "Expected at least 2 VAs.");
-    const CCValAssign &VALo = VAs[0];
     const CCValAssign &VAHi = VAs[1];
 
     assert(VAHi.needsCustom() && "Value doesn't need custom handling");
-    assert(VALo.getValNo() == VAHi.getValNo() &&
+    assert(VA.getValNo() == VAHi.getValNo() &&
            "Values belong to different arguments");
 
-    assert(VALo.getLocVT() == MVT::i32 && VAHi.getLocVT() == MVT::i32 &&
-           VALo.getValVT() == MVT::f64 && VAHi.getValVT() == MVT::f64 &&
+    assert(VA.getLocVT() == MVT::i32 && VAHi.getLocVT() == MVT::i32 &&
+           VA.getValVT() == MVT::f64 && VAHi.getValVT() == MVT::f64 &&
            "unexpected custom value");
 
     Register NewRegs[] = {MRI.createGenericVirtualRegister(LLT::scalar(32)),
@@ -148,7 +163,7 @@ struct RISCVOutgoingValueHandler : public CallLowering::OutgoingValueHandler {
     }
 
     auto assignFunc = [=]() {
-      assignValueToReg(NewRegs[0], VALo.getLocReg(), VALo);
+      assignValueToReg(NewRegs[0], VA.getLocReg(), VA);
       if (VAHi.isRegLoc())
         assignValueToReg(NewRegs[1], VAHi.getLocReg(), VAHi);
     };
@@ -246,16 +261,31 @@ struct RISCVIncomingValueHandler : public CallLowering::IncomingValueHandler {
   unsigned assignCustomValue(CallLowering::ArgInfo &Arg,
                              ArrayRef<CCValAssign> VAs,
                              std::function<void()> *Thunk) override {
+    const CCValAssign &VA = VAs[0];
+    if ((VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) ||
+        (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)) {
+      Register PhysReg = VA.getLocReg();
+
+      markPhysRegUsed(PhysReg);
+
+      LLT LocTy(VA.getLocVT());
+      auto Copy = MIRBuilder.buildCopy(LocTy, PhysReg);
+
+      unsigned Opc =
+          VA.getValVT() == MVT::f32 ? RISCV::G_FMV_W_X_RV64 : RISCV::G_FMV_H_X;
+      MIRBuilder.buildInstr(Opc, {Arg.Regs[0]}, {Copy.getReg(0)});
+      return 1;
+    }
+
     assert(VAs.size() >= 2 && "Expected at least 2 VAs.");
-    const CCValAssign &VALo = VAs[0];
     const CCValAssign &VAHi = VAs[1];
 
     assert(VAHi.needsCustom() && "Value doesn't need custom handling");
-    assert(VALo.getValNo() == VAHi.getValNo() &&
+    assert(VA.getValNo() == VAHi.getValNo() &&
            "Values belong to different arguments");
 
-    assert(VALo.getLocVT() == MVT::i32 && VAHi.getLocVT() == MVT::i32 &&
-           VALo.getValVT() == MVT::f64 && VAHi.getValVT() == MVT::f64 &&
+    assert(VA.getLocVT() == MVT::i32 && VAHi.getLocVT() == MVT::i32 &&
+           VA.getValVT() == MVT::f64 && VAHi.getValVT() == MVT::f64 &&
            "unexpected custom value");
 
     Register NewRegs[] = {MRI.createGenericVirtualRegister(LLT::scalar(32)),
@@ -272,7 +302,7 @@ struct RISCVIncomingValueHandler : public CallLowering::IncomingValueHandler {
                            const_cast<CCValAssign &>(VAHi));
     }
 
-    assignValueToReg(NewRegs[0], VALo.getLocReg(), VALo);
+    assignValueToReg(NewRegs[0], VA.getLocReg(), VA);
     if (VAHi.isRegLoc())
       assignValueToReg(NewRegs[1], VAHi.getLocReg(), VAHi);
 
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
index 5369be24f0e7cb..7c6b7f69b0ec5b 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
@@ -434,6 +434,8 @@ RISCVRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
   }
   case TargetOpcode::G_FPTOSI:
   case TargetOpcode::G_FPTOUI:
+  case RISCV::G_FMV_X_ANYEXTW_RV64:
+  case RISCV::G_FMV_X_ANYEXTH:
   case RISCV::G_FCLASS: {
     LLT Ty = MRI.getType(MI.getOperand(1).getReg());
     OpdsMapping[0] = GPRValueMapping;
@@ -441,7 +443,9 @@ RISCVRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
     break;
   }
   case TargetOpcode::G_SITOFP:
-  case TargetOpcode::G_UITOFP: {
+  case TargetOpcode::G_UITOFP:
+  case RISCV::G_FMV_W_X_RV64:
+  case RISCV::G_FMV_H_X: {
     LLT Ty = MRI.getType(MI.getOperand(0).getReg());
     OpdsMapping[0] = getFPValueMapping(Ty.getSizeInBits());
     OpdsMapping[1] = GPRValueMapping;
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 670dee2edb1dfb..6120336ea50675 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -18672,6 +18672,19 @@ bool RISCV::CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
   // similar local variables rather than directly checking against the target
   // ABI.
 
+  ArrayRef<MCPhysReg> ArgGPRs = RISCV::getArgGPRs(ABI);
+
+  if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::bf16 ||
+                           (ValVT == MVT::f32 && XLen == 64))) {
+    Register Reg = State.AllocateReg(ArgGPRs);
+    if (Reg) {
+      LocVT = XLenVT;
+      State.addLoc(
+          CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+      return false;
+    }
+  }
+
   if (UseGPRForF16_F32 &&
       (ValVT == MVT::f16 || ValVT == MVT::bf16 || ValVT == MVT::f32)) {
     LocVT = XLenVT;
@@ -18681,8 +18694,6 @@ bool RISCV::CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
     LocInfo = CCValAssign::BCvt;
   }
 
-  ArrayRef<MCPhysReg> ArgGPRs = RISCV::getArgGPRs(ABI);
-
   // If this is a variadic argument, the RISC-V calling convention requires
   // that it is assigned an 'even' or 'aligned' register if it has 8-byte
   // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
@@ -18938,6 +18949,17 @@ void RISCVTargetLowering::analyzeOutputArgs(
 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
                                    const CCValAssign &VA, const SDLoc &DL,
                                    const RISCVSubtarget &Subtarget) {
+  if (VA.needsCustom()) {
+    if (VA.getLocVT().isInteger() &&
+        (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16))
+      Val = DAG.getNode(RISCVISD::FMV_H_X, DL, VA.getValVT(), Val);
+    else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
+      Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val);
+    else
+      llvm_unreachable("Unexpected Custom handling.");
+    return Val;
+  }
+
   switch (VA.getLocInfo()) {
   default:
     llvm_unreachable("Unexpected CCValAssign::LocInfo");
@@ -18946,14 +18968,7 @@ static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
       Val = convertFromScalableVector(VA.getValVT(), Val, DAG, Subtarget);
     break;
   case CCValAssign::BCvt:
-    if (VA.getLocVT().isInteger() &&
-        (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16)) {
-      Val = DAG.getNode(RISCVISD::FMV_H_X, DL, VA.getValVT(), Val);
-    } else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) {
-      Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val);
-    } else {
-      Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
-    }
+    Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
     break;
   }
   return Val;
@@ -18999,6 +19014,17 @@ static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
                                    const RISCVSubtarget &Subtarget) {
   EVT LocVT = VA.getLocVT();
 
+  if (VA.needsCustom()) {
+    if (LocVT.isInteger() &&
+        (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16))
+      Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, LocVT, Val);
+    else if (LocVT == MVT::i64 && VA.getValVT() == MVT::f32)
+      Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val);
+    else
+      llvm_unreachable("Unexpected Custom handling.");
+    return Val;
+  }
+
   switch (VA.getLocInfo()) {
   default:
     llvm_unreachable("Unexpected CCValAssign::LocInfo");
@@ -19007,14 +19033,7 @@ static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
       Val = convertToScalableVector(LocVT, Val, DAG, Subtarget);
     break;
   case CCValAssign::BCvt:
-    if (LocVT.isInteger() &&
-        (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16)) {
-      Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, LocVT, Val);
-    } else if (LocVT == MVT::i64 && VA.getValVT() == MVT::f32) {
-      Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val);
-    } else {
-      Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
-    }
+    Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
     break;
   }
   return Val;
@@ -19793,9 +19812,8 @@ SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
       Glue = RetValue2.getValue(2);
       RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
                              RetValue2);
-    }
-
-    RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL, Subtarget);
+    } else
+      RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL, Subtarget);
 
     InVals.push_back(RetValue);
   }
diff --git a/llvm/lib/Target/RISCV/RISCVInstrGISel.td b/llvm/lib/Target/RISCV/RISCVInstrGISel.td
index ba40662c49c1df..ca58daa793b75b 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrGISel.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrGISel.td
@@ -17,6 +17,34 @@ class RISCVGenericInstruction : GenericInstruction {
   let Namespace = "RISCV";
 }
 
+def G_FMV_X_ANYEXTW_RV64 : RISCVGenericInstruction {
+  let OutOperandList = (outs unknown:$dst);
+  let InOperandList = (ins unknown:$src);
+  let hasSideEffects = false;
+}
+def : GINodeEquiv<G_FMV_X_ANYEXTW_RV64, riscv_fmv_x_anyextw_rv64>;
+
+def G_FMV_X_ANYEXTH : RISCVGenericInstruction {
+  let OutOperandList = (outs unknown:$dst);
+  let InOperandList = (ins unknown:$src);
+  let hasSideEffects = false;
+}
+def : GINodeEquiv<G_FMV_X_ANYEXTH, riscv_fmv_x_anyexth>;
+
+def G_FMV_W_X_RV64 : RISCVGenericInstruction {
+  let OutOperandList = (outs unknown:$dst);
+  let InOperandList = (ins unknown:$src);
+  let hasSideEffects = false;
+}
+def : GINodeEquiv<G_FMV_W_X_RV64, riscv_fmv_w_x_rv64>;
+
+def G_FMV_H_X : RISCVGenericInstruction {
+  let OutOperandList = (outs unknown:$dst);
+  let InOperandList = (ins unknown:$src);
+  let hasSideEffects = false;
+}
+def : GINodeEquiv<G_FMV_H_X, riscv_fmv_h_x>;
+
 // Pseudo equivalent to a RISCVISD::FCLASS.
 def G_FCLASS : RISCVGenericInstruction {
   let OutOperandList = (outs type0:$dst);
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-half.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-half.ll
index 04fa62b1950763..de1f005dcb841d 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-half.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-half.ll
@@ -780,8 +780,8 @@ define half @callee_half_return_stack2(half %v1, half %v2, half %v3, half %v4, h
   ; RV32IZFH-NEXT:   [[COPY6:%[0-9]+]]:_(s16) = COPY $f16_h
   ; RV32IZFH-NEXT:   [[COPY7:%[0-9]+]]:_(s16) = COPY $f17_h
   ; RV32IZFH-NEXT:   [[COPY8:%[0-9]+]]:_(s32) = COPY $x10
-  ; RV32IZFH-NEXT:   [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY8]](s32)
-  ; RV32IZFH-NEXT:   $f10_h = COPY [[TRUNC]](s16)
+  ; RV32IZFH-NEXT:   [[FMV_H_X:%[0-9]+]]:_(s16) = G_FMV_H_X [[COPY8]](s32)
+  ; RV32IZFH-NEXT:   $f10_h = COPY [[FMV_H_X]](s16)
   ; RV32IZFH-NEXT:   PseudoRET implicit $f10_h
   ;
   ; RV64I-LABEL: name: callee_half_return_stack2
@@ -832,8 +832,8 @@ define half @callee_half_return_stack2(half %v1, half %v2, half %v3, half %v4, h
   ; RV64IF-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $f17_f
   ; RV64IF-NEXT:   [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[COPY7]](s32)
   ; RV64IF-NEXT:   [[COPY8:%[0-9]+]]:_(s64) = COPY $x10
-  ; RV64IF-NEXT:   [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[COPY8]](s64)
-  ; RV64IF-NEXT:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC8]](s16)
+  ; RV64IF-NEXT:   [[FMV_W_X_RV64_:%[0-9]+]]:_(s16) = G_FMV_W_X_RV64 [[COPY8]](s64)
+  ; RV64IF-NEXT:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FMV_W_X_RV64_]](s16)
   ; RV64IF-NEXT:   $f10_f = COPY [[ANYEXT]](s32)
   ; RV64IF-NEXT:   PseudoRET implicit $f10_f
   ;
@@ -850,8 +850,8 @@ define half @callee_half_return_stack2(half %v1, half %v2, half %v3, half %v4, h
   ; RV64IZFH-NEXT:   [[COPY6:%[0-9]+]]:_(s16) = COPY $f16_h
   ; RV64IZFH-NEXT:   [[COPY7:%[0-9]+]]:_(s16) = COPY $f17_h
   ; RV64IZFH-NEXT:   [[COPY8:%[0-9]+]]:_(s64) = COPY $x10
-  ; RV64IZFH-NEXT:   [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY8]](s64)
-  ; RV64IZFH-NEXT:   $f10_h = COPY [[TRUNC]](s16)
+  ; RV64IZFH-NEXT:   [[FMV_H_X:%[0-9]+]]:_(s16) = G_FMV_H_X [[COPY8]](s64)
+  ; RV64IZFH-NEXT:   $f10_h = COPY [[FMV_H_X]](s16)
   ; RV64IZFH-NEXT:   PseudoRET implicit $f10_h
   ret half %x
 }
@@ -951,8 +951,8 @@ define half @caller_half_return_stack2(half %x, half %y) nounwind {
   ; RV32IZFH-NEXT:   $f15_h = COPY [[COPY1]](s16)
   ; RV32IZFH-NEXT:   $f16_h = COPY [[COPY1]](s16)
   ; RV32IZFH-NEXT:   $f17_h = COPY [[COPY1]](s16)
-  ; RV32IZFH-NEXT:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY]](s16)
-  ; RV32IZFH-NEXT:   $x10 = COPY [[ANYEXT]](s32)
+  ; RV32IZFH-NEXT:   [[FMV_X_ANYEXTH:%[0-9]+]]:_(s32) = G_FMV_X_ANYEXTH [[COPY]](s16)
+  ; RV32IZFH-NEXT:   $x10 = COPY [[FMV_X_ANYEXTH]](s32)
   ; RV32IZFH-NEXT:   PseudoCALL target-flags(riscv-call) @callee_half_return_stack2, csr_ilp32f_lp64f, implicit-def $x1, implicit $f10_h, implicit $f11_h, implicit $f12_h, implicit $f13_h, implicit $f14_h, implicit $f15_h, implicit $f16_h, implicit $f17_h, implicit $x10, implicit-def $f10_h
   ; RV32IZFH-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
   ; RV32IZFH-NEXT:   [[COPY2:%[0-9]+]]:_(s16) = COPY $f10_h
@@ -1018,7 +1018,6 @@ define half @caller_half_return_stack2(half %x, half %y) nounwind {
   ; RV64IF-NEXT:   [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC1]](s16)
   ; RV64IF-NEXT:   [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC1]](s16)
   ; RV64IF-NEXT:   [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC1]](s16)
-  ; RV64IF-NEXT:   [[ANYEXT8:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
   ; RV64IF-NEXT:   $f10_f = COPY [[ANYEXT]](s32)
   ; RV64IF-NEXT:   $f11_f = COPY [[ANYEXT1]](s32)
   ; RV64IF-NEXT:   $f12_f = COPY [[ANYEXT2]](s32)
@@ -1027,14 +1026,14 @@ define half @caller_half_return_stack2(half %x, half %y) nounwind {
   ; RV64IF-NEXT:   $f15_f = COPY [[ANYEXT5]](s32)
   ; RV64IF-NEXT:   $f16_f = COPY [[ANYEXT6]](s32)
   ; RV64IF-NEXT:   $f17_f = COPY [[ANYEXT7]](s32)
-  ; RV64IF-NEXT:   [[ANYEXT9:%[0-9]+]]:_(s64) = G_ANYEXT [[ANYEXT8]](s32)
-  ; RV64IF-NEXT:   $x10 = COPY [[ANYEXT9]](s64)
+  ; RV64IF-NEXT:   [[FMV_X_ANYEXTW_RV64_:%[0-9]+]]:_(s64) = G_FMV_X_ANYEXTW_RV64 [[TRUNC]](s16)
+  ; RV64IF-NEXT:   $x10 = COPY [[FMV_X_ANYEXTW_RV64_]](s64)
   ; RV64IF-NEXT:   PseudoCALL target-flags(riscv-call) @callee_half_return_stack2, csr_ilp32f_lp64f, implicit-def $x1, implicit $f10_f, implicit $f11_f, implicit $f12_f, implicit $f13_f, implicit $f14_f, implicit $f15_f, implicit $f16_f, implicit $f17_f, implicit $x10, implicit-def $f10_f
   ; RV64IF-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
   ; RV64IF-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $f10_f
   ; RV64IF-NEXT:   [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
-  ; RV64IF-NEXT:   [[ANYEXT10:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC2]](s16)
-  ; RV64IF-NEXT:   $f10_f = COPY [[ANYEXT10]](s32)
+  ; RV64IF-NEXT:   [[ANYEXT8:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC2]](s16)
+  ; RV64IF-NEXT:   $f10_f = COPY [[ANYEXT8]](s32)
   ; RV64IF-NEXT:   PseudoRET implicit $f10_f
   ;
   ; RV64IZFH-LABEL: name: caller_half_return_stack2
@@ -1054,8 +1053,8 @@ define half @caller_half_return_stack2(half %x, half %y) nounwind {
   ; RV64IZFH-NEXT:   $f15_h = COPY [[COPY1]](s16)
   ; RV64IZFH-NEXT:   $f16_h = COPY [[COPY1]](s16)
   ; RV64IZFH-NEXT:   $f17_h = COPY [[COPY1]](s16)
-  ; RV64IZFH-NEXT:   [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[COPY]](s16)
-  ; RV64IZFH-NEXT:   $x10 = COPY [[ANYEXT]](s64)
+  ; RV64IZFH-NEXT:   [[FMV_X_ANYEXTH:%[0-9]+]]:_(s64) = G_FMV_X_ANYEXTH [[COPY]](s16)
+  ; RV64IZFH-NEXT:   $x10 = COPY [[FMV_X_ANYEXTH]](s64)
   ; RV64IZFH-NEXT:   PseudoCALL target-flags(riscv-call) @callee_half_return_stack2, csr_ilp32f_lp64f, implicit-def $x1, implicit $f10_h, implicit $f11_h, implicit $f12_h, implicit $f13_h, implicit $f14_h, implicit $f15_h, implicit $f16_h, implicit $f17_h, implicit $x10, implicit-def $f10_h
   ; RV64IZFH-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
   ; RV64IZFH-NEXT:   [[COPY2:%[0-9]+]]:_(s16) = COPY $f10_h
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64.ll
index 9283f1f090ed55..bbcb5bea17ab1a 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64.ll
@@ -1,10 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 3
 ; RUN: llc -mtriple=riscv64 -global-isel -stop-after=irtranslator \
 ; RUN:    -verify-machineinstrs < %s \
-; RUN:   | FileCheck -check-prefixes=RV64,RV64I %s
+; RUN:   | FileCheck -check-prefixes=RV64I %s
 ; RUN: llc -mtriple=riscv64 -mattr=+f -target-abi=lp64 \
 ; RUN:    -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \
-; RUN:   | FileCheck -check-prefixes=RV64,RV64F %s
+; RUN:   | FileCheck -check-prefixes=RV64F %s
 
 ; Any tests that would have identical output for some combination of the lp64*
 ; ABIs belong in calling-conv-*-common.ll. This file contains tests that will
@@ -12,17 +12,29 @@
 ; passed according to the floating point ABI.
 
 define i64 @callee_float_in_regs(i64 %a, float %b) nounwind {
-  ; RV64-LABEL: name: callee_float_in_regs
-  ; RV64: bb.1 (%ir-block.0):
-  ; RV64-NEXT:   liveins: $x10, $x11
-  ; RV64-NEXT: {{  $}}
-  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $x10
-  ; RV64-NEXT:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
-  ; RV64-NEXT:   [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
-  ; RV64-NEXT:   [[FPTOSI:%[0-9]+]]:_(s64) = G_FPTOSI [[TRUNC]](s32)
-  ; RV64-NEXT:   [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[FPTOSI]]
-  ; RV64-NEXT:   $x10 = COPY [[ADD]](s64)
-  ; RV64-NEXT:   PseudoRET implicit $x10
+  ; RV64I-LABEL: name: callee_float_in_regs
+  ; RV64I: bb.1 (%ir-block.0):
+  ; RV64I-NEXT:   liveins: $x10, $x11
+  ; RV64I-NEXT: {{  $}}
+  ; RV64I-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64I-NEXT:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+  ; RV64I-NEXT:   [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
+  ; RV64I-NEXT:   [[FPTOSI:%[0-9]+]]:_(s64) = G_FPTOSI [[TRUNC]](s32)
+  ; RV64I-NEXT:   [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[FPTOSI]]
+  ; RV64I-NEXT:   $x10 = COPY [[ADD]](s64)
+  ; RV64I-NEXT:   PseudoRET implicit $x10
+  ;
+  ; RV64F-LABEL: name: callee_float_in_regs
+  ; RV64F: bb.1 (%ir-block.0):
+  ; RV64F-NEXT:   liveins: $x10, $x11
+  ; RV64F-NEXT: {{  $}}
+  ; RV64F-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64F-NEXT:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
+  ; RV64F-NEXT:   [[FMV_W_X_RV64_:%[0-9]+]]:_(s32) = G_FMV_W_X_RV64 [[COPY1]](s64)
+  ; RV64F-NEXT:   [[FPTOSI:%[0-9]+]]:_(s64) = G_FPTOSI [[FMV_W_X_RV64_]](s32)
+  ; RV64F-NEXT:   [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[FPTOSI]]
+  ; RV64F-NEXT:   $x10 = COPY [[ADD]](s64)
+  ; RV64F-NEXT:   PseudoRET implicit $x10
   %b_fptosi = fptosi float %b to i64
   %1 = add i64 %a, %b_fptosi
   ret i64 %1
@@ -49,8 +61,8 @@ define i64 @caller_float_in_regs() nounwind {
   ; RV64F-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 2.000000e+00
   ; RV64F-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
   ; RV64F-NEXT:   $x10 = COPY [[C]](s64)
-  ; RV64F-NEXT:   [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
-  ; RV64F-NEXT:   $x11 = COPY [[ANYEXT]](s64)
+  ; RV64F-NEXT:   [[FMV_X_ANYEXTW_RV64_:%[0-9]+]]:_(s64) = G_FMV_X_ANYEXTW_RV64 [[C1]](s32)
+  ; RV64F-NEXT:   $x11 = COPY [[FMV_X_ANYEXTW_RV64_]](s64)
   ; RV64F-NEXT:   PseudoCALL target-flags(riscv-call) @callee_float_in_regs, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
   ; RV64F-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
   ; RV64F-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $x10
@@ -61,26 +73,44 @@ define i64 @caller_float_in_regs() nounwind {
 }
 
 define float @callee_tiny_scalar_ret() nounwind {
-  ; RV64-LABEL: name: callee_tiny_scalar_ret
-  ; RV64: bb.1 (%ir-block.0):
-  ; RV64-NEXT:   [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
-  ; RV64-NEXT:   [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
-  ; RV64-NEXT:   $x10 = COPY [[ANYEXT]](s64)
-  ; RV64-NEXT:   PseudoRET implicit $x10
+  ; RV64I-LABEL: name: callee_tiny_scalar_ret
+  ; RV64I: bb.1 (%ir-block.0):
+  ; RV64I-NEXT:   [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+  ; RV64I-NEXT:   [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32)
+  ; RV64I-NEXT:   $x10 = COPY [[ANYEXT]](s64)
+  ; RV64I-NEXT:   PseudoRET implicit $x10
+  ;
+  ; RV64F-LABEL: name: callee_tiny_scalar_ret
+  ; RV64F: bb.1 (%ir-block.0):
+  ; RV64F-NEXT:   [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+  ; RV64F-NEXT:   [[FMV_X_ANYEXTW_RV64_:%[0-9]+]]:_(s64) = G_FMV_X_ANYEXTW_RV64 [[C]](s32)
+  ; RV64F-NEXT:   $x10 = COPY [[FMV_X_ANYEXTW_RV64_]](s64)
+  ; RV64F-NEXT:   PseudoRET implicit $x10
   ret float 1.0
 }
 
 define i64 @caller_tiny_scalar_ret() nounwind {
-  ; RV64-LABEL: name: caller_tiny_scalar_ret
-  ; RV64: bb.1 (%ir-block.0):
-  ; RV64-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
-  ; RV64-NEXT:   PseudoCALL target-flags(riscv-call) @callee_tiny_scalar_ret, csr_ilp32_lp64, implicit-def $x1, implicit-def $x10
-  ; RV64-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
-  ; RV64-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $x10
-  ; RV64-NEXT:   [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
-  ; RV64-NEXT:   [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[TRUNC]](s32)
-  ; RV64-NEXT:   $x10 = COPY [[SEXT]](s64)
-  ; RV64-NEXT:   PseudoRET implicit $x10
+  ; RV64I-LABEL: name: caller_tiny_scalar_ret
+  ; RV64I: bb.1 (%ir-block.0):
+  ; RV64I-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
+  ; RV64I-NEXT:   PseudoCALL target-flags(riscv-call) @callee_tiny_scalar_ret, csr_ilp32_lp64, implicit-def $x1, implicit-def $x10
+  ; RV64I-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
+  ; RV64I-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64I-NEXT:   [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+  ; RV64I-NEXT:   [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[TRUNC]](s32)
+  ; RV64I-NEXT:   $x10 = COPY [[SEXT]](s64)
+  ; RV64I-NEXT:   PseudoRET implicit $x10
+  ;
+  ; RV64F-LABEL: name: caller_tiny_scalar_ret
+  ; RV64F: bb.1 (%ir-block.0):
+  ; RV64F-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
+  ; RV64F-NEXT:   PseudoCALL target-flags(riscv-call) @callee_tiny_scalar_ret, csr_ilp32_lp64, implicit-def $x1, implicit-def $x10
+  ; RV64F-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
+  ; RV64F-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+  ; RV64F-NEXT:   [[FMV_W_X_RV64_:%[0-9]+]]:_(s32) = G_FMV_W_X_RV64 [[COPY]](s64)
+  ; RV64F-NEXT:   [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[FMV_W_X_RV64_]](s32)
+  ; RV64F-NEXT:   $x10 = COPY [[SEXT]](s64)
+  ; RV64F-NEXT:   PseudoRET implicit $x10
   %1 = call float @callee_tiny_scalar_ret()
   %2 = bitcast float %1 to i32
   %3 = sext i32 %2 to i64



More information about the llvm-commits mailing list