[llvm] [RISCV] Support inline assembly 'f' constraint for Zfinx. (PR #112986)
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Fri Oct 18 15:21:16 PDT 2024
https://github.com/topperc updated https://github.com/llvm/llvm-project/pull/112986
>From 72cc15d3ab4e15d42e957056935f7a92a614985f Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Fri, 18 Oct 2024 14:10:58 -0700
Subject: [PATCH] [RISCV] Support inline assembly 'f' constraint for Zfinx.
This would allow some inline assembly code work with either F or Zfinx.
This appears to match gcc behavior.
This will need to be adjust to exclude X0 after #112563.
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 48 ++++++++---
.../RISCV/inline-asm-zdinx-constraint-r.ll | 48 +++++++++++
.../RISCV/inline-asm-zfinx-constraint-r.ll | 45 ++++++++++
.../RISCV/inline-asm-zhinx-constraint-r.ll | 82 +++++++++++++++++++
4 files changed, 211 insertions(+), 12 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index fbd2f47d276903..3588ef46cadce1 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -20392,12 +20392,24 @@ RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
return std::make_pair(0U, &RISCV::GPRPairNoX0RegClass);
return std::make_pair(0U, &RISCV::GPRNoX0RegClass);
case 'f':
- if (Subtarget.hasStdExtZfhmin() && VT == MVT::f16)
- return std::make_pair(0U, &RISCV::FPR16RegClass);
- if (Subtarget.hasStdExtF() && VT == MVT::f32)
- return std::make_pair(0U, &RISCV::FPR32RegClass);
- if (Subtarget.hasStdExtD() && VT == MVT::f64)
- return std::make_pair(0U, &RISCV::FPR64RegClass);
+ if (VT == MVT::f16) {
+ if (Subtarget.hasStdExtZfhmin())
+ return std::make_pair(0U, &RISCV::FPR16RegClass);
+ if (Subtarget.hasStdExtZhinxmin())
+ return std::make_pair(0U, &RISCV::GPRF16NoX0RegClass);
+ } else if (VT == MVT::f32) {
+ if (Subtarget.hasStdExtF())
+ return std::make_pair(0U, &RISCV::FPR32RegClass);
+ if (Subtarget.hasStdExtZfinx())
+ return std::make_pair(0U, &RISCV::GPRF32NoX0RegClass);
+ } else if (VT == MVT::f64) {
+ if (Subtarget.hasStdExtD())
+ return std::make_pair(0U, &RISCV::FPR64RegClass);
+ if (Subtarget.hasStdExtZdinx() && !Subtarget.is64Bit())
+ return std::make_pair(0U, &RISCV::GPRPairNoX0RegClass);
+ if (Subtarget.hasStdExtZdinx() && Subtarget.is64Bit())
+ return std::make_pair(0U, &RISCV::GPRNoX0RegClass);
+ }
break;
default:
break;
@@ -20440,12 +20452,24 @@ RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
if (!VT.isVector())
return std::make_pair(0U, &RISCV::GPRCRegClass);
} else if (Constraint == "cf") {
- if (Subtarget.hasStdExtZfhmin() && VT == MVT::f16)
- return std::make_pair(0U, &RISCV::FPR16CRegClass);
- if (Subtarget.hasStdExtF() && VT == MVT::f32)
- return std::make_pair(0U, &RISCV::FPR32CRegClass);
- if (Subtarget.hasStdExtD() && VT == MVT::f64)
- return std::make_pair(0U, &RISCV::FPR64CRegClass);
+ if (VT == MVT::f16) {
+ if (Subtarget.hasStdExtZfhmin())
+ return std::make_pair(0U, &RISCV::FPR16CRegClass);
+ if (Subtarget.hasStdExtZhinxmin())
+ return std::make_pair(0U, &RISCV::GPRF16CRegClass);
+ } else if (VT == MVT::f32) {
+ if (Subtarget.hasStdExtF())
+ return std::make_pair(0U, &RISCV::FPR32CRegClass);
+ if (Subtarget.hasStdExtZfinx())
+ return std::make_pair(0U, &RISCV::GPRF32CRegClass);
+ } else if (VT == MVT::f64) {
+ if (Subtarget.hasStdExtD())
+ return std::make_pair(0U, &RISCV::FPR64CRegClass);
+ if (Subtarget.hasStdExtZdinx() && !Subtarget.is64Bit())
+ return std::make_pair(0U, &RISCV::GPRPairCRegClass);
+ if (Subtarget.hasStdExtZdinx() && Subtarget.is64Bit())
+ return std::make_pair(0U, &RISCV::GPRCRegClass);
+ }
}
// Clang will correctly decode the usage of register name aliases into their
diff --git a/llvm/test/CodeGen/RISCV/inline-asm-zdinx-constraint-r.ll b/llvm/test/CodeGen/RISCV/inline-asm-zdinx-constraint-r.ll
index 15729ee2bc61e9..57be0e5e4199ac 100644
--- a/llvm/test/CodeGen/RISCV/inline-asm-zdinx-constraint-r.ll
+++ b/llvm/test/CodeGen/RISCV/inline-asm-zdinx-constraint-r.ll
@@ -90,3 +90,51 @@ define double @constraint_double_abi_name(double %a) nounwind {
%2 = tail call double asm "fadd.d $0, $1, $2", "={t1},{a0},{s0}"(double %a, double %1)
ret double %2
}
+
+define double @constraint_f_double(double %a) nounwind {
+; RV32FINX-LABEL: constraint_f_double:
+; RV32FINX: # %bb.0:
+; RV32FINX-NEXT: lui a2, %hi(gd)
+; RV32FINX-NEXT: lw a3, %lo(gd+4)(a2)
+; RV32FINX-NEXT: lw a2, %lo(gd)(a2)
+; RV32FINX-NEXT: #APP
+; RV32FINX-NEXT: fadd.d a0, a0, a2
+; RV32FINX-NEXT: #NO_APP
+; RV32FINX-NEXT: ret
+;
+; RV64FINX-LABEL: constraint_f_double:
+; RV64FINX: # %bb.0:
+; RV64FINX-NEXT: lui a1, %hi(gd)
+; RV64FINX-NEXT: ld a1, %lo(gd)(a1)
+; RV64FINX-NEXT: #APP
+; RV64FINX-NEXT: fadd.d a0, a0, a1
+; RV64FINX-NEXT: #NO_APP
+; RV64FINX-NEXT: ret
+ %1 = load double, ptr @gd
+ %2 = tail call double asm "fadd.d $0, $1, $2", "=f,f,f"(double %a, double %1)
+ ret double %2
+}
+
+define double @constraint_cf_double(double %a) nounwind {
+; RV32FINX-LABEL: constraint_cf_double:
+; RV32FINX: # %bb.0:
+; RV32FINX-NEXT: lui a2, %hi(gd)
+; RV32FINX-NEXT: lw a3, %lo(gd+4)(a2)
+; RV32FINX-NEXT: lw a2, %lo(gd)(a2)
+; RV32FINX-NEXT: #APP
+; RV32FINX-NEXT: fadd.d a0, a0, a2
+; RV32FINX-NEXT: #NO_APP
+; RV32FINX-NEXT: ret
+;
+; RV64FINX-LABEL: constraint_cf_double:
+; RV64FINX: # %bb.0:
+; RV64FINX-NEXT: lui a1, %hi(gd)
+; RV64FINX-NEXT: ld a1, %lo(gd)(a1)
+; RV64FINX-NEXT: #APP
+; RV64FINX-NEXT: fadd.d a0, a0, a1
+; RV64FINX-NEXT: #NO_APP
+; RV64FINX-NEXT: ret
+ %1 = load double, ptr @gd
+ %2 = tail call double asm "fadd.d $0, $1, $2", "=^cf,^cf,^cf"(double %a, double %1)
+ ret double %2
+}
diff --git a/llvm/test/CodeGen/RISCV/inline-asm-zfinx-constraint-r.ll b/llvm/test/CodeGen/RISCV/inline-asm-zfinx-constraint-r.ll
index a8d3515fe1890e..1c0de6c3f16121 100644
--- a/llvm/test/CodeGen/RISCV/inline-asm-zfinx-constraint-r.ll
+++ b/llvm/test/CodeGen/RISCV/inline-asm-zfinx-constraint-r.ll
@@ -87,3 +87,48 @@ define float @constraint_float_abi_name(float %a) nounwind {
ret float %2
}
+define float @constraint_f_float(float %a) nounwind {
+; RV32FINX-LABEL: constraint_f_float:
+; RV32FINX: # %bb.0:
+; RV32FINX-NEXT: lui a1, %hi(gf)
+; RV32FINX-NEXT: lw a1, %lo(gf)(a1)
+; RV32FINX-NEXT: #APP
+; RV32FINX-NEXT: fadd.s a0, a0, a1
+; RV32FINX-NEXT: #NO_APP
+; RV32FINX-NEXT: ret
+;
+; RV64FINX-LABEL: constraint_f_float:
+; RV64FINX: # %bb.0:
+; RV64FINX-NEXT: lui a1, %hi(gf)
+; RV64FINX-NEXT: lw a1, %lo(gf)(a1)
+; RV64FINX-NEXT: #APP
+; RV64FINX-NEXT: fadd.s a0, a0, a1
+; RV64FINX-NEXT: #NO_APP
+; RV64FINX-NEXT: ret
+ %1 = load float, ptr @gf
+ %2 = tail call float asm "fadd.s $0, $1, $2", "=f,f,f"(float %a, float %1)
+ ret float %2
+}
+
+define float @constraint_cf_float(float %a) nounwind {
+; RV32FINX-LABEL: constraint_cf_float:
+; RV32FINX: # %bb.0:
+; RV32FINX-NEXT: lui a1, %hi(gf)
+; RV32FINX-NEXT: lw a1, %lo(gf)(a1)
+; RV32FINX-NEXT: #APP
+; RV32FINX-NEXT: fadd.s a0, a0, a1
+; RV32FINX-NEXT: #NO_APP
+; RV32FINX-NEXT: ret
+;
+; RV64FINX-LABEL: constraint_cf_float:
+; RV64FINX: # %bb.0:
+; RV64FINX-NEXT: lui a1, %hi(gf)
+; RV64FINX-NEXT: lw a1, %lo(gf)(a1)
+; RV64FINX-NEXT: #APP
+; RV64FINX-NEXT: fadd.s a0, a0, a1
+; RV64FINX-NEXT: #NO_APP
+; RV64FINX-NEXT: ret
+ %1 = load float, ptr @gf
+ %2 = tail call float asm "fadd.s $0, $1, $2", "=^cf,cf,cf"(float %a, float %1)
+ ret float %2
+}
diff --git a/llvm/test/CodeGen/RISCV/inline-asm-zhinx-constraint-r.ll b/llvm/test/CodeGen/RISCV/inline-asm-zhinx-constraint-r.ll
index f9707c6c8995dc..086d2a1d6f3b2f 100644
--- a/llvm/test/CodeGen/RISCV/inline-asm-zhinx-constraint-r.ll
+++ b/llvm/test/CodeGen/RISCV/inline-asm-zhinx-constraint-r.ll
@@ -156,3 +156,85 @@ define half @constraint_half_abi_name(half %a) nounwind {
%2 = tail call half asm "fadd.s $0, $1, $2", "={t0},{a0},{s0}"(half %a, half %1)
ret half %2
}
+
+define half @constraint_f_half(half %a) nounwind {
+; RV32ZHINX-LABEL: constraint_f_half:
+; RV32ZHINX: # %bb.0:
+; RV32ZHINX-NEXT: lui a1, %hi(gh)
+; RV32ZHINX-NEXT: lh a1, %lo(gh)(a1)
+; RV32ZHINX-NEXT: #APP
+; RV32ZHINX-NEXT: fadd.h a0, a0, a1
+; RV32ZHINX-NEXT: #NO_APP
+; RV32ZHINX-NEXT: ret
+;
+; RV64ZHINX-LABEL: constraint_f_half:
+; RV64ZHINX: # %bb.0:
+; RV64ZHINX-NEXT: lui a1, %hi(gh)
+; RV64ZHINX-NEXT: lh a1, %lo(gh)(a1)
+; RV64ZHINX-NEXT: #APP
+; RV64ZHINX-NEXT: fadd.h a0, a0, a1
+; RV64ZHINX-NEXT: #NO_APP
+; RV64ZHINX-NEXT: ret
+;
+; RV32DINXZHINX-LABEL: constraint_f_half:
+; RV32DINXZHINX: # %bb.0:
+; RV32DINXZHINX-NEXT: lui a1, %hi(gh)
+; RV32DINXZHINX-NEXT: lh a1, %lo(gh)(a1)
+; RV32DINXZHINX-NEXT: #APP
+; RV32DINXZHINX-NEXT: fadd.h a0, a0, a1
+; RV32DINXZHINX-NEXT: #NO_APP
+; RV32DINXZHINX-NEXT: ret
+;
+; RV64DINXZHINX-LABEL: constraint_f_half:
+; RV64DINXZHINX: # %bb.0:
+; RV64DINXZHINX-NEXT: lui a1, %hi(gh)
+; RV64DINXZHINX-NEXT: lh a1, %lo(gh)(a1)
+; RV64DINXZHINX-NEXT: #APP
+; RV64DINXZHINX-NEXT: fadd.h a0, a0, a1
+; RV64DINXZHINX-NEXT: #NO_APP
+; RV64DINXZHINX-NEXT: ret
+ %1 = load half, ptr @gh
+ %2 = tail call half asm "fadd.h $0, $1, $2", "=f,f,f"(half %a, half %1)
+ ret half %2
+}
+
+define half @constraint_cf_half(half %a) nounwind {
+; RV32ZHINX-LABEL: constraint_cf_half:
+; RV32ZHINX: # %bb.0:
+; RV32ZHINX-NEXT: lui a1, %hi(gh)
+; RV32ZHINX-NEXT: lh a1, %lo(gh)(a1)
+; RV32ZHINX-NEXT: #APP
+; RV32ZHINX-NEXT: fadd.h a0, a0, a1
+; RV32ZHINX-NEXT: #NO_APP
+; RV32ZHINX-NEXT: ret
+;
+; RV64ZHINX-LABEL: constraint_cf_half:
+; RV64ZHINX: # %bb.0:
+; RV64ZHINX-NEXT: lui a1, %hi(gh)
+; RV64ZHINX-NEXT: lh a1, %lo(gh)(a1)
+; RV64ZHINX-NEXT: #APP
+; RV64ZHINX-NEXT: fadd.h a0, a0, a1
+; RV64ZHINX-NEXT: #NO_APP
+; RV64ZHINX-NEXT: ret
+;
+; RV32DINXZHINX-LABEL: constraint_cf_half:
+; RV32DINXZHINX: # %bb.0:
+; RV32DINXZHINX-NEXT: lui a1, %hi(gh)
+; RV32DINXZHINX-NEXT: lh a1, %lo(gh)(a1)
+; RV32DINXZHINX-NEXT: #APP
+; RV32DINXZHINX-NEXT: fadd.h a0, a0, a1
+; RV32DINXZHINX-NEXT: #NO_APP
+; RV32DINXZHINX-NEXT: ret
+;
+; RV64DINXZHINX-LABEL: constraint_cf_half:
+; RV64DINXZHINX: # %bb.0:
+; RV64DINXZHINX-NEXT: lui a1, %hi(gh)
+; RV64DINXZHINX-NEXT: lh a1, %lo(gh)(a1)
+; RV64DINXZHINX-NEXT: #APP
+; RV64DINXZHINX-NEXT: fadd.h a0, a0, a1
+; RV64DINXZHINX-NEXT: #NO_APP
+; RV64DINXZHINX-NEXT: ret
+ %1 = load half, ptr @gh
+ %2 = tail call half asm "fadd.h $0, $1, $2", "=^cf,^cf,^cf"(half %a, half %1)
+ ret half %2
+}
More information about the llvm-commits
mailing list