[llvm] 41402c6 - [RISCV][GISel] Use CCValAssign::getCustomReg for converting f16/f32<->GPR. (#105700)
via llvm-commits
llvm-commits at lists.llvm.org
Tue Sep 3 22:49:05 PDT 2024
Author: Craig Topper
Date: 2024-09-03T22:49:02-07:00
New Revision: 41402c6a8aa3a4336122bdb4530fb05538efedba
URL: https://github.com/llvm/llvm-project/commit/41402c6a8aa3a4336122bdb4530fb05538efedba
DIFF: https://github.com/llvm/llvm-project/commit/41402c6a8aa3a4336122bdb4530fb05538efedba.diff
LOG: [RISCV][GISel] Use CCValAssign::getCustomReg for converting f16/f32<->GPR. (#105700)
This gives us much better control of the generated code for GISel. I've
tried to closely match the current gisel code, but it looks like we had
2 layers of G_ANYEXT in some cases before.
SelectionDAG now checks needsCustom() instead of detecting the special
cases in the Bitcast handler.
Unfortunately, IRTranslator for bitcast still generates copies between
register classes of different sizes. Because of this we can't handle
i16<->f16 bitcasts without crashing. Not sure if I should teach
RISCVInstrInfo::copyPhysReg to allow copies between FPR16 and GPR or if
I should convert the copies to instructions in GISel.
Added:
Modified:
llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
llvm/lib/Target/RISCV/RISCVISelLowering.cpp
llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-half.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
index c3cb1be963cabd..6e33032384eded 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
@@ -109,15 +109,6 @@ struct RISCVOutgoingValueHandler : public CallLowering::OutgoingValueHandler {
void assignValueToReg(Register ValVReg, Register PhysReg,
const CCValAssign &VA) override {
- // If we're passing a smaller fp value into a larger integer register,
- // anyextend before copying.
- if ((VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) ||
- ((VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::i64) &&
- VA.getValVT() == MVT::f16)) {
- LLT DstTy = LLT::scalar(VA.getLocVT().getSizeInBits());
- ValVReg = MIRBuilder.buildAnyExt(DstTy, ValVReg).getReg(0);
- }
-
Register ExtReg = extendRegister(ValVReg, VA);
MIRBuilder.buildCopy(PhysReg, ExtReg);
MIB.addUse(PhysReg, RegState::Implicit);
@@ -126,16 +117,35 @@ struct RISCVOutgoingValueHandler : public CallLowering::OutgoingValueHandler {
unsigned assignCustomValue(CallLowering::ArgInfo &Arg,
ArrayRef<CCValAssign> VAs,
std::function<void()> *Thunk) override {
+ const CCValAssign &VA = VAs[0];
+ if ((VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) ||
+ (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)) {
+ Register PhysReg = VA.getLocReg();
+
+ auto assignFunc = [=]() {
+ auto Trunc = MIRBuilder.buildAnyExt(LLT(VA.getLocVT()), Arg.Regs[0]);
+ MIRBuilder.buildCopy(PhysReg, Trunc);
+ MIB.addUse(PhysReg, RegState::Implicit);
+ };
+
+ if (Thunk) {
+ *Thunk = assignFunc;
+ return 1;
+ }
+
+ assignFunc();
+ return 1;
+ }
+
assert(VAs.size() >= 2 && "Expected at least 2 VAs.");
- const CCValAssign &VALo = VAs[0];
const CCValAssign &VAHi = VAs[1];
assert(VAHi.needsCustom() && "Value doesn't need custom handling");
- assert(VALo.getValNo() == VAHi.getValNo() &&
+ assert(VA.getValNo() == VAHi.getValNo() &&
"Values belong to
diff erent arguments");
- assert(VALo.getLocVT() == MVT::i32 && VAHi.getLocVT() == MVT::i32 &&
- VALo.getValVT() == MVT::f64 && VAHi.getValVT() == MVT::f64 &&
+ assert(VA.getLocVT() == MVT::i32 && VAHi.getLocVT() == MVT::i32 &&
+ VA.getValVT() == MVT::f64 && VAHi.getValVT() == MVT::f64 &&
"unexpected custom value");
Register NewRegs[] = {MRI.createGenericVirtualRegister(LLT::scalar(32)),
@@ -154,7 +164,7 @@ struct RISCVOutgoingValueHandler : public CallLowering::OutgoingValueHandler {
}
auto assignFunc = [=]() {
- assignValueToReg(NewRegs[0], VALo.getLocReg(), VALo);
+ assignValueToReg(NewRegs[0], VA.getLocReg(), VA);
if (VAHi.isRegLoc())
assignValueToReg(NewRegs[1], VAHi.getLocReg(), VAHi);
};
@@ -258,16 +268,29 @@ struct RISCVIncomingValueHandler : public CallLowering::IncomingValueHandler {
unsigned assignCustomValue(CallLowering::ArgInfo &Arg,
ArrayRef<CCValAssign> VAs,
std::function<void()> *Thunk) override {
+ const CCValAssign &VA = VAs[0];
+ if ((VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) ||
+ (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)) {
+ Register PhysReg = VA.getLocReg();
+
+ markPhysRegUsed(PhysReg);
+
+ LLT LocTy(VA.getLocVT());
+ auto Copy = MIRBuilder.buildCopy(LocTy, PhysReg);
+
+ MIRBuilder.buildTrunc(Arg.Regs[0], Copy.getReg(0));
+ return 1;
+ }
+
assert(VAs.size() >= 2 && "Expected at least 2 VAs.");
- const CCValAssign &VALo = VAs[0];
const CCValAssign &VAHi = VAs[1];
assert(VAHi.needsCustom() && "Value doesn't need custom handling");
- assert(VALo.getValNo() == VAHi.getValNo() &&
+ assert(VA.getValNo() == VAHi.getValNo() &&
"Values belong to
diff erent arguments");
- assert(VALo.getLocVT() == MVT::i32 && VAHi.getLocVT() == MVT::i32 &&
- VALo.getValVT() == MVT::f64 && VAHi.getValVT() == MVT::f64 &&
+ assert(VA.getLocVT() == MVT::i32 && VAHi.getLocVT() == MVT::i32 &&
+ VA.getValVT() == MVT::f64 && VAHi.getValVT() == MVT::f64 &&
"unexpected custom value");
Register NewRegs[] = {MRI.createGenericVirtualRegister(LLT::scalar(32)),
@@ -284,7 +307,7 @@ struct RISCVIncomingValueHandler : public CallLowering::IncomingValueHandler {
const_cast<CCValAssign &>(VAHi));
}
- assignValueToReg(NewRegs[0], VALo.getLocReg(), VALo);
+ assignValueToReg(NewRegs[0], VA.getLocReg(), VA);
if (VAHi.isRegLoc())
assignValueToReg(NewRegs[1], VAHi.getLocReg(), VAHi);
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 5089bbbe3c0d7c..d46a08a442a01d 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -19226,6 +19226,19 @@ bool RISCV::CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
// similar local variables rather than directly checking against the target
// ABI.
+ ArrayRef<MCPhysReg> ArgGPRs = RISCV::getArgGPRs(ABI);
+
+ if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::bf16 ||
+ (ValVT == MVT::f32 && XLen == 64))) {
+ Register Reg = State.AllocateReg(ArgGPRs);
+ if (Reg) {
+ LocVT = XLenVT;
+ State.addLoc(
+ CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ return false;
+ }
+ }
+
if (UseGPRForF16_F32 &&
(ValVT == MVT::f16 || ValVT == MVT::bf16 || ValVT == MVT::f32)) {
LocVT = XLenVT;
@@ -19235,8 +19248,6 @@ bool RISCV::CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
LocInfo = CCValAssign::BCvt;
}
- ArrayRef<MCPhysReg> ArgGPRs = RISCV::getArgGPRs(ABI);
-
// If this is a variadic argument, the RISC-V calling convention requires
// that it is assigned an 'even' or 'aligned' register if it has 8-byte
// alignment (RV32) or 16-byte alignment (RV64). An aligned register should
@@ -19483,6 +19494,17 @@ void RISCVTargetLowering::analyzeOutputArgs(
static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
const CCValAssign &VA, const SDLoc &DL,
const RISCVSubtarget &Subtarget) {
+ if (VA.needsCustom()) {
+ if (VA.getLocVT().isInteger() &&
+ (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16))
+ Val = DAG.getNode(RISCVISD::FMV_H_X, DL, VA.getValVT(), Val);
+ else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
+ Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val);
+ else
+ llvm_unreachable("Unexpected Custom handling.");
+ return Val;
+ }
+
switch (VA.getLocInfo()) {
default:
llvm_unreachable("Unexpected CCValAssign::LocInfo");
@@ -19491,14 +19513,7 @@ static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
Val = convertFromScalableVector(VA.getValVT(), Val, DAG, Subtarget);
break;
case CCValAssign::BCvt:
- if (VA.getLocVT().isInteger() &&
- (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16)) {
- Val = DAG.getNode(RISCVISD::FMV_H_X, DL, VA.getValVT(), Val);
- } else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) {
- Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val);
- } else {
- Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
- }
+ Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
break;
}
return Val;
@@ -19544,6 +19559,17 @@ static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
const RISCVSubtarget &Subtarget) {
EVT LocVT = VA.getLocVT();
+ if (VA.needsCustom()) {
+ if (LocVT.isInteger() &&
+ (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16))
+ Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, LocVT, Val);
+ else if (LocVT == MVT::i64 && VA.getValVT() == MVT::f32)
+ Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val);
+ else
+ llvm_unreachable("Unexpected Custom handling.");
+ return Val;
+ }
+
switch (VA.getLocInfo()) {
default:
llvm_unreachable("Unexpected CCValAssign::LocInfo");
@@ -19552,14 +19578,7 @@ static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
Val = convertToScalableVector(LocVT, Val, DAG, Subtarget);
break;
case CCValAssign::BCvt:
- if (LocVT.isInteger() &&
- (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16)) {
- Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, LocVT, Val);
- } else if (LocVT == MVT::i64 && VA.getValVT() == MVT::f32) {
- Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val);
- } else {
- Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
- }
+ Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
break;
}
return Val;
@@ -19693,8 +19712,14 @@ bool RISCV::CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI,
(LocVT == MVT::f64 && Subtarget.is64Bit() &&
Subtarget.hasStdExtZdinx())) {
if (MCRegister Reg = State.AllocateReg(getFastCCArgGPRs(ABI))) {
- LocInfo = CCValAssign::BCvt;
+ if (LocVT.getSizeInBits() != Subtarget.getXLen()) {
+ LocVT = Subtarget.getXLenVT();
+ State.addLoc(
+ CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ return false;
+ }
LocVT = Subtarget.getXLenVT();
+ LocInfo = CCValAssign::BCvt;
State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
return false;
}
@@ -20337,9 +20362,8 @@ SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
Glue = RetValue2.getValue(2);
RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
RetValue2);
- }
-
- RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL, Subtarget);
+ } else
+ RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL, Subtarget);
InVals.push_back(RetValue);
}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-half.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-half.ll
index 04fa62b1950763..63bc43ae20e7be 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-half.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-half.ll
@@ -1018,7 +1018,6 @@ define half @caller_half_return_stack2(half %x, half %y) nounwind {
; RV64IF-NEXT: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC1]](s16)
; RV64IF-NEXT: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC1]](s16)
; RV64IF-NEXT: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC1]](s16)
- ; RV64IF-NEXT: [[ANYEXT8:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
; RV64IF-NEXT: $f10_f = COPY [[ANYEXT]](s32)
; RV64IF-NEXT: $f11_f = COPY [[ANYEXT1]](s32)
; RV64IF-NEXT: $f12_f = COPY [[ANYEXT2]](s32)
@@ -1027,14 +1026,14 @@ define half @caller_half_return_stack2(half %x, half %y) nounwind {
; RV64IF-NEXT: $f15_f = COPY [[ANYEXT5]](s32)
; RV64IF-NEXT: $f16_f = COPY [[ANYEXT6]](s32)
; RV64IF-NEXT: $f17_f = COPY [[ANYEXT7]](s32)
- ; RV64IF-NEXT: [[ANYEXT9:%[0-9]+]]:_(s64) = G_ANYEXT [[ANYEXT8]](s32)
- ; RV64IF-NEXT: $x10 = COPY [[ANYEXT9]](s64)
+ ; RV64IF-NEXT: [[ANYEXT8:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC]](s16)
+ ; RV64IF-NEXT: $x10 = COPY [[ANYEXT8]](s64)
; RV64IF-NEXT: PseudoCALL target-flags(riscv-call) @callee_half_return_stack2, csr_ilp32f_lp64f, implicit-def $x1, implicit $f10_f, implicit $f11_f, implicit $f12_f, implicit $f13_f, implicit $f14_f, implicit $f15_f, implicit $f16_f, implicit $f17_f, implicit $x10, implicit-def $f10_f
; RV64IF-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV64IF-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $f10_f
; RV64IF-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
- ; RV64IF-NEXT: [[ANYEXT10:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC2]](s16)
- ; RV64IF-NEXT: $f10_f = COPY [[ANYEXT10]](s32)
+ ; RV64IF-NEXT: [[ANYEXT9:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC2]](s16)
+ ; RV64IF-NEXT: $f10_f = COPY [[ANYEXT9]](s32)
; RV64IF-NEXT: PseudoRET implicit $f10_f
;
; RV64IZFH-LABEL: name: caller_half_return_stack2
More information about the llvm-commits
mailing list