[clang] [llvm] [RISCV][ISel] Ensure 'in X' Constraints prevent X0 (PR #112563)

Sam Elliott via cfe-commits cfe-commits at lists.llvm.org
Fri Oct 18 05:02:27 PDT 2024


https://github.com/lenary updated https://github.com/llvm/llvm-project/pull/112563

>From bbf0b885dc5912d4dc29abcec5fe7cee7cfd1758 Mon Sep 17 00:00:00 2001
From: Sam Elliott <quic_aelliott at quicinc.com>
Date: Wed, 16 Oct 2024 05:04:45 -0700
Subject: [PATCH 1/2] [RISCV] Inline Assembly: RVC constraint and N modifier

This change implements support for the `cr` and `cf` register
constraints (which allocate a RVC GPR or RVC FPR respectively), and the
`N` modifier (which prints the raw encoding of a register rather than the
name).

The intention behind these additions is to make it easier to use inline
assembly when assembling raw instructions that are not supported by the
compiler, for instance when experimenting with new instructions or when
supporting proprietary extensions outside the toolchain.

These implement part of my proposal in riscv-non-isa/riscv-c-api-doc#92

As part of the implementation, I felt there was not enough coverage of
inline assembly and the "in X" floating-point extensions, so I have
added more regression tests around these configurations.
---
 clang/lib/Basic/Targets/RISCV.cpp             |  10 ++
 clang/test/CodeGen/RISCV/riscv-inline-asm.c   |  40 ++++-
 llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp     |   8 +
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp   |  18 ++
 llvm/lib/Target/RISCV/RISCVRegisterInfo.td    |  19 ++-
 .../RISCV/inline-asm-d-constraint-f.ll        |  33 ++++
 .../CodeGen/RISCV/inline-asm-d-modifier-N.ll  | 109 ++++++++++++
 .../RISCV/inline-asm-f-constraint-f.ll        |  28 +++-
 .../CodeGen/RISCV/inline-asm-f-modifier-N.ll  |  96 +++++++++++
 llvm/test/CodeGen/RISCV/inline-asm-invalid.ll |  20 +++
 .../RISCV/inline-asm-zdinx-constraint-r.ll    |  92 ++++++++++
 .../RISCV/inline-asm-zfh-constraint-f.ll      |  41 +++++
 .../RISCV/inline-asm-zfh-modifier-N.ll        | 157 +++++++++++++++++
 .../RISCV/inline-asm-zfinx-constraint-r.ll    |  89 ++++++++++
 .../RISCV/inline-asm-zhinx-constraint-r.ll    | 158 ++++++++++++++++++
 llvm/test/CodeGen/RISCV/inline-asm.ll         |  66 ++++++++
 .../CodeGen/RISCV/zdinx-asm-constraint.ll     |  61 +++++++
 17 files changed, 1040 insertions(+), 5 deletions(-)
 create mode 100644 llvm/test/CodeGen/RISCV/inline-asm-d-modifier-N.ll
 create mode 100644 llvm/test/CodeGen/RISCV/inline-asm-f-modifier-N.ll
 create mode 100644 llvm/test/CodeGen/RISCV/inline-asm-zdinx-constraint-r.ll
 create mode 100644 llvm/test/CodeGen/RISCV/inline-asm-zfh-modifier-N.ll
 create mode 100644 llvm/test/CodeGen/RISCV/inline-asm-zfinx-constraint-r.ll
 create mode 100644 llvm/test/CodeGen/RISCV/inline-asm-zhinx-constraint-r.ll

diff --git a/clang/lib/Basic/Targets/RISCV.cpp b/clang/lib/Basic/Targets/RISCV.cpp
index 870f0f38bc3057..eaaba7642bd7b2 100644
--- a/clang/lib/Basic/Targets/RISCV.cpp
+++ b/clang/lib/Basic/Targets/RISCV.cpp
@@ -100,6 +100,14 @@ bool RISCVTargetInfo::validateAsmConstraint(
   case 'S': // A symbol or label reference with a constant offset
     Info.setAllowsRegister();
     return true;
+  case 'c':
+    // A RVC register - GPR or FPR
+    if (Name[1] == 'r' || Name[1] == 'f') {
+      Info.setAllowsRegister();
+      Name += 1;
+      return true;
+    }
+    return false;
   case 'v':
     // A vector register.
     if (Name[1] == 'r' || Name[1] == 'd' || Name[1] == 'm') {
@@ -114,6 +122,8 @@ bool RISCVTargetInfo::validateAsmConstraint(
 std::string RISCVTargetInfo::convertConstraint(const char *&Constraint) const {
   std::string R;
   switch (*Constraint) {
+  // c* and v* are two-letter constraints on RISC-V.
+  case 'c':
   case 'v':
     R = std::string("^") + std::string(Constraint, 2);
     Constraint += 1;
diff --git a/clang/test/CodeGen/RISCV/riscv-inline-asm.c b/clang/test/CodeGen/RISCV/riscv-inline-asm.c
index fa0bf6aa6aa471..75b91d3c497c50 100644
--- a/clang/test/CodeGen/RISCV/riscv-inline-asm.c
+++ b/clang/test/CodeGen/RISCV/riscv-inline-asm.c
@@ -3,7 +3,35 @@
 // RUN: %clang_cc1 -triple riscv64 -O2 -emit-llvm %s -o - \
 // RUN:     | FileCheck %s
 
-// Test RISC-V specific inline assembly constraints.
+// Test RISC-V specific inline assembly constraints and modifiers.
+
+long test_r(long x) {
+// CHECK-LABEL: define{{.*}} {{i64|i32}} @test_r(
+// CHECK: call {{i64|i32}} asm sideeffect "", "=r,r"({{i64|i32}} %{{.*}})
+  long ret;
+  asm volatile ("" : "=r"(ret) : "r"(x));
+// CHECK: call {{i64|i32}} asm sideeffect "", "=r,r"({{i64|i32}} %{{.*}})
+  asm volatile ("" : "=r"(ret) : "r"(x));
+  return ret;
+}
+
+long test_cr(long x) {
+// CHECK-LABEL: define{{.*}} {{i64|i32}} @test_cr(
+// CHECK: call {{i64|i32}} asm sideeffect "", "=^cr,^cr"({{i64|i32}} %{{.*}})
+  long ret;
+  asm volatile ("" : "=cr"(ret) : "cr"(x));
+  return ret;
+}
+
+float cf;
+double cd;
+void test_cf(float f, double d) {
+// CHECK-LABEL: define{{.*}} void @test_cf(
+// CHECK: call float asm sideeffect "", "=^cf,^cf"(float %{{.*}})
+  asm volatile("" : "=cf"(cf) : "cf"(f));
+// CHECK: call double asm sideeffect "", "=^cf,^cf"(double %{{.*}})
+  asm volatile("" : "=cf"(cd) : "cf"(d));
+}
 
 void test_I(void) {
 // CHECK-LABEL: define{{.*}} void @test_I()
@@ -58,3 +86,13 @@ void test_s(void) {
 
   asm("// %0 %1 %2" :: "S"(&var), "S"(&arr[1][1]), "S"(test_s));
 }
+
+// CHECK-LABEL: test_modifiers(
+// CHECK: call void asm sideeffect "// ${0:i} ${1:i}", "r,r"({{i32|i64}} %val, i32 37)
+// CHECK: call void asm sideeffect "// ${0:z} ${1:z}", "i,i"(i32 0, i32 1)
+// CHECK: call void asm sideeffect "// ${0:N}", "r"({{i32|i64}} %val)
+void test_modifiers(long val) {
+  asm volatile("// %i0 %i1" :: "r"(val), "r"(37));
+  asm volatile("// %z0 %z1" :: "i"(0), "i"(1));
+  asm volatile("// %N0" :: "r"(val));
+}
diff --git a/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp b/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp
index 5ad09ae7290fc5..5eba36a0bb7d69 100644
--- a/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp
+++ b/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp
@@ -19,6 +19,7 @@
 #include "RISCV.h"
 #include "RISCVConstantPoolValue.h"
 #include "RISCVMachineFunctionInfo.h"
+#include "RISCVRegisterInfo.h"
 #include "RISCVTargetMachine.h"
 #include "TargetInfo/RISCVTargetInfo.h"
 #include "llvm/ADT/APInt.h"
@@ -348,6 +349,13 @@ bool RISCVAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
       if (!MO.isReg())
         OS << 'i';
       return false;
+    case 'N': // Print the register encoding as an integer (0-31)
+      if (!MO.isReg())
+        return true;
+
+      const RISCVRegisterInfo *TRI = STI->getRegisterInfo();
+      OS << TRI->getEncodingValue(MO.getReg());
+      return false;
     }
   }
 
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index cde690793f0702..aa34676d2b2a61 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -20229,6 +20229,8 @@ RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
   } else {
     if (Constraint == "vr" || Constraint == "vd" || Constraint == "vm")
       return C_RegisterClass;
+    if (Constraint == "cr" || Constraint == "cf")
+      return C_RegisterClass;
   }
   return TargetLowering::getConstraintType(Constraint);
 }
@@ -20291,6 +20293,22 @@ RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
   } else if (Constraint == "vm") {
     if (TRI->isTypeLegalForClass(RISCV::VMV0RegClass, VT.SimpleTy))
       return std::make_pair(0U, &RISCV::VMV0RegClass);
+  } else if (Constraint == "cr") {
+    if (VT == MVT::f16 && Subtarget.hasStdExtZhinxmin())
+      return std::make_pair(0U, &RISCV::GPRF16CRegClass);
+    if (VT == MVT::f32 && Subtarget.hasStdExtZfinx())
+      return std::make_pair(0U, &RISCV::GPRF32CRegClass);
+    if (VT == MVT::f64 && Subtarget.hasStdExtZdinx() && !Subtarget.is64Bit())
+      return std::make_pair(0U, &RISCV::GPRPairCRegClass);
+    if (!VT.isVector())
+      return std::make_pair(0U, &RISCV::GPRCRegClass);
+  } else if (Constraint == "cf") {
+    if (Subtarget.hasStdExtZfhmin() && VT == MVT::f16)
+      return std::make_pair(0U, &RISCV::FPR16CRegClass);
+    if (Subtarget.hasStdExtF() && VT == MVT::f32)
+      return std::make_pair(0U, &RISCV::FPR32CRegClass);
+    if (Subtarget.hasStdExtD() && VT == MVT::f64)
+      return std::make_pair(0U, &RISCV::FPR64CRegClass);
   }
 
   // Clang will correctly decode the usage of register name aliases into their
diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
index 33363aa8b71830..250f3c10f309bf 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
@@ -338,6 +338,11 @@ def FPR16 : RISCVRegisterClass<[f16, bf16], 16, (add
     (sequence "F%u_H", 18, 27)  // fs2-fs11
 )>;
 
+def FPR16C : RISCVRegisterClass<[f16, bf16], 16, (add
+    (sequence "F%u_H", 15, 10),
+    (sequence "F%u_H", 8, 9)
+)>;
+
 def FPR32 : RISCVRegisterClass<[f32], 32, (add
     (sequence "F%u_F", 15, 10),
     (sequence "F%u_F", 0, 7),
@@ -667,6 +672,10 @@ def GPRF32C : RISCVRegisterClass<[f32], 32, (add (sequence "X%u_W", 10, 15),
                                                  (sequence "X%u_W", 8, 9))>;
 def GPRF32NoX0 : RISCVRegisterClass<[f32], 32, (sub GPRF32, X0_W)>;
 
+def XLenPairRI : RegInfoByHwMode<
+      [RV32,                RV64],
+      [RegInfo<64, 64, 32>, RegInfo<128, 128, 64>]>;
+
 // Dummy zero register for use in the register pair containing X0 (as X1 is
 // not read to or written when the X0 register pair is used).
 def DUMMY_REG_PAIR_WITH_X0 : RISCVReg<0, "0">;
@@ -698,9 +707,8 @@ let RegAltNameIndices = [ABIRegAltName] in {
   }
 }
 
-let RegInfos = RegInfoByHwMode<[RV32, RV64],
-                               [RegInfo<64, 64, 32>, RegInfo<128, 128, 64>]>,
-    DecoderMethod = "DecodeGPRPairRegisterClass" in
+let RegInfos = XLenPairRI,
+    DecoderMethod = "DecodeGPRPairRegisterClass" in {
 def GPRPair : RISCVRegisterClass<[XLenPairFVT], 64, (add
     X10_X11, X12_X13, X14_X15, X16_X17,
     X6_X7,
@@ -710,6 +718,11 @@ def GPRPair : RISCVRegisterClass<[XLenPairFVT], 64, (add
     X0_Pair, X2_X3, X4_X5
 )>;
 
+def GPRPairC : RISCVRegisterClass<[XLenPairFVT], 64, (add
+  X10_X11, X12_X13, X14_X15, X8_X9
+)>;
+} // let RegInfos = XLenPairRI, DecoderMethod = "DecodeGPRPairRegisterClass"
+
 // The register class is added for inline assembly for vector mask types.
 def VM : VReg<VMaskVTs, (add VR), 1>;
 
diff --git a/llvm/test/CodeGen/RISCV/inline-asm-d-constraint-f.ll b/llvm/test/CodeGen/RISCV/inline-asm-d-constraint-f.ll
index c480ba800c6904..08e91736582012 100644
--- a/llvm/test/CodeGen/RISCV/inline-asm-d-constraint-f.ll
+++ b/llvm/test/CodeGen/RISCV/inline-asm-d-constraint-f.ll
@@ -39,6 +39,39 @@ define double @constraint_f_double(double %a) nounwind {
   ret double %2
 }
 
+define double @constraint_cf_double(double %a) nounwind {
+; RV32F-LABEL: constraint_cf_double:
+; RV32F:       # %bb.0:
+; RV32F-NEXT:    addi sp, sp, -16
+; RV32F-NEXT:    sw a0, 8(sp)
+; RV32F-NEXT:    sw a1, 12(sp)
+; RV32F-NEXT:    fld fa5, 8(sp)
+; RV32F-NEXT:    lui a0, %hi(gd)
+; RV32F-NEXT:    fld fa4, %lo(gd)(a0)
+; RV32F-NEXT:    #APP
+; RV32F-NEXT:    fadd.d fa5, fa5, fa4
+; RV32F-NEXT:    #NO_APP
+; RV32F-NEXT:    fsd fa5, 8(sp)
+; RV32F-NEXT:    lw a0, 8(sp)
+; RV32F-NEXT:    lw a1, 12(sp)
+; RV32F-NEXT:    addi sp, sp, 16
+; RV32F-NEXT:    ret
+;
+; RV64F-LABEL: constraint_cf_double:
+; RV64F:       # %bb.0:
+; RV64F-NEXT:    lui a1, %hi(gd)
+; RV64F-NEXT:    fld fa5, %lo(gd)(a1)
+; RV64F-NEXT:    fmv.d.x fa4, a0
+; RV64F-NEXT:    #APP
+; RV64F-NEXT:    fadd.d fa5, fa4, fa5
+; RV64F-NEXT:    #NO_APP
+; RV64F-NEXT:    fmv.x.d a0, fa5
+; RV64F-NEXT:    ret
+  %1 = load double, ptr @gd
+  %2 = tail call double asm "fadd.d $0, $1, $2", "=^cf,^cf,^cf"(double %a, double %1)
+  ret double %2
+}
+
 define double @constraint_f_double_abi_name(double %a) nounwind {
 ; RV32F-LABEL: constraint_f_double_abi_name:
 ; RV32F:       # %bb.0:
diff --git a/llvm/test/CodeGen/RISCV/inline-asm-d-modifier-N.ll b/llvm/test/CodeGen/RISCV/inline-asm-d-modifier-N.ll
new file mode 100644
index 00000000000000..581cf8e3bf3c9e
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/inline-asm-d-modifier-N.ll
@@ -0,0 +1,109 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d -target-abi=ilp32 -verify-machineinstrs -no-integrated-as < %s \
+; RUN:   | FileCheck -check-prefix=RV32F %s
+; RUN: llc -mtriple=riscv64 -mattr=+d -target-abi=lp64 -verify-machineinstrs -no-integrated-as < %s \
+; RUN:   | FileCheck -check-prefix=RV64F %s
+
+;; `.insn 0x4, 0x02000053 | (${0:N} << 7) | (${1:N} << 15) | (${2:N} << 20)` is
+;; the raw encoding for `fadd.d`
+
+ at gd = external global double
+
+define double @constraint_f_double(double %a) nounwind {
+; RV32F-LABEL: constraint_f_double:
+; RV32F:       # %bb.0:
+; RV32F-NEXT:    addi sp, sp, -16
+; RV32F-NEXT:    sw a0, 8(sp)
+; RV32F-NEXT:    sw a1, 12(sp)
+; RV32F-NEXT:    fld fa5, 8(sp)
+; RV32F-NEXT:    lui a0, %hi(gd)
+; RV32F-NEXT:    fld fa4, %lo(gd)(a0)
+; RV32F-NEXT:    #APP
+; RV32F-NEXT:    .insn 0x4, 0x02000053 | (15 << 7) | (15 << 15) | (14 << 20)
+; RV32F-NEXT:    #NO_APP
+; RV32F-NEXT:    fsd fa5, 8(sp)
+; RV32F-NEXT:    lw a0, 8(sp)
+; RV32F-NEXT:    lw a1, 12(sp)
+; RV32F-NEXT:    addi sp, sp, 16
+; RV32F-NEXT:    ret
+;
+; RV64F-LABEL: constraint_f_double:
+; RV64F:       # %bb.0:
+; RV64F-NEXT:    lui a1, %hi(gd)
+; RV64F-NEXT:    fld fa5, %lo(gd)(a1)
+; RV64F-NEXT:    fmv.d.x fa4, a0
+; RV64F-NEXT:    #APP
+; RV64F-NEXT:    .insn 0x4, 0x02000053 | (15 << 7) | (14 << 15) | (15 << 20)
+; RV64F-NEXT:    #NO_APP
+; RV64F-NEXT:    fmv.x.d a0, fa5
+; RV64F-NEXT:    ret
+  %1 = load double, ptr @gd
+  %2 = tail call double asm ".insn 0x4, 0x02000053 | (${0:N} << 7) | (${1:N} << 15) | (${2:N} << 20)", "=f,f,f"(double %a, double %1)
+  ret double %2
+}
+
+define double @constraint_cf_double(double %a) nounwind {
+; RV32F-LABEL: constraint_cf_double:
+; RV32F:       # %bb.0:
+; RV32F-NEXT:    addi sp, sp, -16
+; RV32F-NEXT:    sw a0, 8(sp)
+; RV32F-NEXT:    sw a1, 12(sp)
+; RV32F-NEXT:    fld fa5, 8(sp)
+; RV32F-NEXT:    lui a0, %hi(gd)
+; RV32F-NEXT:    fld fa4, %lo(gd)(a0)
+; RV32F-NEXT:    #APP
+; RV32F-NEXT:    .insn 0x4, 0x02000053 | (15 << 7) | (15 << 15) | (14 << 20)
+; RV32F-NEXT:    #NO_APP
+; RV32F-NEXT:    fsd fa5, 8(sp)
+; RV32F-NEXT:    lw a0, 8(sp)
+; RV32F-NEXT:    lw a1, 12(sp)
+; RV32F-NEXT:    addi sp, sp, 16
+; RV32F-NEXT:    ret
+;
+; RV64F-LABEL: constraint_cf_double:
+; RV64F:       # %bb.0:
+; RV64F-NEXT:    lui a1, %hi(gd)
+; RV64F-NEXT:    fld fa5, %lo(gd)(a1)
+; RV64F-NEXT:    fmv.d.x fa4, a0
+; RV64F-NEXT:    #APP
+; RV64F-NEXT:    .insn 0x4, 0x02000053 | (15 << 7) | (14 << 15) | (15 << 20)
+; RV64F-NEXT:    #NO_APP
+; RV64F-NEXT:    fmv.x.d a0, fa5
+; RV64F-NEXT:    ret
+  %1 = load double, ptr @gd
+  %2 = tail call double asm ".insn 0x4, 0x02000053 | (${0:N} << 7) | (${1:N} << 15) | (${2:N} << 20)", "=^cf,^cf,^cf"(double %a, double %1)
+  ret double %2
+}
+
+define double @constraint_f_double_abi_name(double %a) nounwind {
+; RV32F-LABEL: constraint_f_double_abi_name:
+; RV32F:       # %bb.0:
+; RV32F-NEXT:    addi sp, sp, -16
+; RV32F-NEXT:    sw a0, 8(sp)
+; RV32F-NEXT:    sw a1, 12(sp)
+; RV32F-NEXT:    fld fa1, 8(sp)
+; RV32F-NEXT:    lui a0, %hi(gd)
+; RV32F-NEXT:    fld fs0, %lo(gd)(a0)
+; RV32F-NEXT:    #APP
+; RV32F-NEXT:    .insn 0x4, 0x02000053 | (0 << 7) | (11 << 15) | (8 << 20)
+; RV32F-NEXT:    #NO_APP
+; RV32F-NEXT:    fsd ft0, 8(sp)
+; RV32F-NEXT:    lw a0, 8(sp)
+; RV32F-NEXT:    lw a1, 12(sp)
+; RV32F-NEXT:    addi sp, sp, 16
+; RV32F-NEXT:    ret
+;
+; RV64F-LABEL: constraint_f_double_abi_name:
+; RV64F:       # %bb.0:
+; RV64F-NEXT:    lui a1, %hi(gd)
+; RV64F-NEXT:    fld fs0, %lo(gd)(a1)
+; RV64F-NEXT:    fmv.d.x fa1, a0
+; RV64F-NEXT:    #APP
+; RV64F-NEXT:    .insn 0x4, 0x02000053 | (0 << 7) | (11 << 15) | (8 << 20)
+; RV64F-NEXT:    #NO_APP
+; RV64F-NEXT:    fmv.x.d a0, ft0
+; RV64F-NEXT:    ret
+  %1 = load double, ptr @gd
+  %2 = tail call double asm ".insn 0x4, 0x02000053 | (${0:N} << 7) | (${1:N} << 15) | (${2:N} << 20)", "={ft0},{fa1},{fs0}"(double %a, double %1)
+  ret double %2
+}
diff --git a/llvm/test/CodeGen/RISCV/inline-asm-f-constraint-f.ll b/llvm/test/CodeGen/RISCV/inline-asm-f-constraint-f.ll
index 91922cd236dfff..a91c6544f9e29c 100644
--- a/llvm/test/CodeGen/RISCV/inline-asm-f-constraint-f.ll
+++ b/llvm/test/CodeGen/RISCV/inline-asm-f-constraint-f.ll
@@ -1,5 +1,4 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; NOTE: Assertions gave been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+f -target-abi=ilp32 -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefix=RV32F %s
 ; RUN: llc -mtriple=riscv64 -mattr=+f -target-abi=lp64 -verify-machineinstrs < %s \
@@ -38,6 +37,33 @@ define float @constraint_f_float(float %a) nounwind {
   ret float %2
 }
 
+define float @constraint_cf_float(float %a) nounwind {
+; RV32F-LABEL: constraint_cf_float:
+; RV32F:       # %bb.0:
+; RV32F-NEXT:    lui a1, %hi(gf)
+; RV32F-NEXT:    flw fa5, %lo(gf)(a1)
+; RV32F-NEXT:    fmv.w.x fa4, a0
+; RV32F-NEXT:    #APP
+; RV32F-NEXT:    fadd.s fa5, fa4, fa5
+; RV32F-NEXT:    #NO_APP
+; RV32F-NEXT:    fmv.x.w a0, fa5
+; RV32F-NEXT:    ret
+;
+; RV64F-LABEL: constraint_cf_float:
+; RV64F:       # %bb.0:
+; RV64F-NEXT:    lui a1, %hi(gf)
+; RV64F-NEXT:    flw fa5, %lo(gf)(a1)
+; RV64F-NEXT:    fmv.w.x fa4, a0
+; RV64F-NEXT:    #APP
+; RV64F-NEXT:    fadd.s fa5, fa4, fa5
+; RV64F-NEXT:    #NO_APP
+; RV64F-NEXT:    fmv.x.w a0, fa5
+; RV64F-NEXT:    ret
+  %1 = load float, ptr @gf
+  %2 = tail call float asm "fadd.s $0, $1, $2", "=^cf,cf,cf"(float %a, float %1)
+  ret float %2
+}
+
 define float @constraint_f_float_abi_name(float %a) nounwind {
 ; RV32F-LABEL: constraint_f_float_abi_name:
 ; RV32F:       # %bb.0:
diff --git a/llvm/test/CodeGen/RISCV/inline-asm-f-modifier-N.ll b/llvm/test/CodeGen/RISCV/inline-asm-f-modifier-N.ll
new file mode 100644
index 00000000000000..a0de5c71a7df6a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/inline-asm-f-modifier-N.ll
@@ -0,0 +1,96 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=riscv32 -mattr=+f -target-abi=ilp32 -verify-machineinstrs -no-integrated-as < %s \
+; RUN:   | FileCheck -check-prefix=RV32F %s
+; RUN: llc -mtriple=riscv64 -mattr=+f -target-abi=lp64 -verify-machineinstrs -no-integrated-as < %s \
+; RUN:   | FileCheck -check-prefix=RV64F %s
+; RUN: llc -mtriple=riscv32 -mattr=+d -target-abi=ilp32 -verify-machineinstrs -no-integrated-as < %s \
+; RUN:   | FileCheck -check-prefix=RV32F %s
+; RUN: llc -mtriple=riscv64 -mattr=+d -target-abi=lp64 -verify-machineinstrs -no-integrated-as < %s \
+; RUN:   | FileCheck -check-prefix=RV64F %s
+
+;; `.insn 0x4, 0x53 | (${0:N} << 7) | (${1:N} << 15) | (${2:N} << 20)` is
+;; the raw encoding for `fadd.s`
+
+ at gf = external global float
+
+define float @constraint_f_modifier_N_float(float %a) nounwind {
+; RV32F-LABEL: constraint_f_modifier_N_float:
+; RV32F:       # %bb.0:
+; RV32F-NEXT:    lui a1, %hi(gf)
+; RV32F-NEXT:    flw fa5, %lo(gf)(a1)
+; RV32F-NEXT:    fmv.w.x fa4, a0
+; RV32F-NEXT:    #APP
+; RV32F-NEXT:    .insn 0x4, 0x53 | (15 << 7) | (14 << 15) | (15 << 20)
+; RV32F-NEXT:    #NO_APP
+; RV32F-NEXT:    fmv.x.w a0, fa5
+; RV32F-NEXT:    ret
+;
+; RV64F-LABEL: constraint_f_modifier_N_float:
+; RV64F:       # %bb.0:
+; RV64F-NEXT:    lui a1, %hi(gf)
+; RV64F-NEXT:    flw fa5, %lo(gf)(a1)
+; RV64F-NEXT:    fmv.w.x fa4, a0
+; RV64F-NEXT:    #APP
+; RV64F-NEXT:    .insn 0x4, 0x53 | (15 << 7) | (14 << 15) | (15 << 20)
+; RV64F-NEXT:    #NO_APP
+; RV64F-NEXT:    fmv.x.w a0, fa5
+; RV64F-NEXT:    ret
+  %1 = load float, ptr @gf
+  %2 = tail call float asm ".insn 0x4, 0x53 | (${0:N} << 7) | (${1:N} << 15) | (${2:N} << 20)", "=f,f,f"(float %a, float %1)
+  ret float %2
+}
+
+
+define float @constraint_cf_modifier_N_float(float %a) nounwind {
+; RV32F-LABEL: constraint_cf_modifier_N_float:
+; RV32F:       # %bb.0:
+; RV32F-NEXT:    lui a1, %hi(gf)
+; RV32F-NEXT:    flw fa5, %lo(gf)(a1)
+; RV32F-NEXT:    fmv.w.x fa4, a0
+; RV32F-NEXT:    #APP
+; RV32F-NEXT:    .insn 0x4, 0x53 | (15 << 7) | (14 << 15) | (15 << 20)
+; RV32F-NEXT:    #NO_APP
+; RV32F-NEXT:    fmv.x.w a0, fa5
+; RV32F-NEXT:    ret
+;
+; RV64F-LABEL: constraint_cf_modifier_N_float:
+; RV64F:       # %bb.0:
+; RV64F-NEXT:    lui a1, %hi(gf)
+; RV64F-NEXT:    flw fa5, %lo(gf)(a1)
+; RV64F-NEXT:    fmv.w.x fa4, a0
+; RV64F-NEXT:    #APP
+; RV64F-NEXT:    .insn 0x4, 0x53 | (15 << 7) | (14 << 15) | (15 << 20)
+; RV64F-NEXT:    #NO_APP
+; RV64F-NEXT:    fmv.x.w a0, fa5
+; RV64F-NEXT:    ret
+  %1 = load float, ptr @gf
+  %2 = tail call float asm ".insn 0x4, 0x53 | (${0:N} << 7) | (${1:N} << 15) | (${2:N} << 20)", "=^cf,^cf,^cf"(float %a, float %1)
+  ret float %2
+}
+
+define float @modifier_N_float_abi_name(float %a) nounwind {
+; RV32F-LABEL: modifier_N_float_abi_name:
+; RV32F:       # %bb.0:
+; RV32F-NEXT:    lui a1, %hi(gf)
+; RV32F-NEXT:    flw fs0, %lo(gf)(a1)
+; RV32F-NEXT:    fmv.w.x fa0, a0
+; RV32F-NEXT:    #APP
+; RV32F-NEXT:    .insn 0x4, 0x53 | (0 << 7) | (10 << 15) | (8 << 20)
+; RV32F-NEXT:    #NO_APP
+; RV32F-NEXT:    fmv.x.w a0, ft0
+; RV32F-NEXT:    ret
+;
+; RV64F-LABEL: modifier_N_float_abi_name:
+; RV64F:       # %bb.0:
+; RV64F-NEXT:    lui a1, %hi(gf)
+; RV64F-NEXT:    flw fs0, %lo(gf)(a1)
+; RV64F-NEXT:    fmv.w.x fa0, a0
+; RV64F-NEXT:    #APP
+; RV64F-NEXT:    .insn 0x4, 0x53 | (0 << 7) | (10 << 15) | (8 << 20)
+; RV64F-NEXT:    #NO_APP
+; RV64F-NEXT:    fmv.x.w a0, ft0
+; RV64F-NEXT:    ret
+  %1 = load float, ptr @gf
+  %2 = tail call float asm ".insn 0x4, 0x53 | (${0:N} << 7) | (${1:N} << 15) | (${2:N} << 20)", "={ft0},{fa0},{fs0}"(float %a, float %1)
+  ret float %2
+}
diff --git a/llvm/test/CodeGen/RISCV/inline-asm-invalid.ll b/llvm/test/CodeGen/RISCV/inline-asm-invalid.ll
index 14b7cb89667491..deffa177c5e6b3 100644
--- a/llvm/test/CodeGen/RISCV/inline-asm-invalid.ll
+++ b/llvm/test/CodeGen/RISCV/inline-asm-invalid.ll
@@ -31,6 +31,14 @@ define void @constraint_f() nounwind {
   ret void
 }
 
+define void @constraint_cf() nounwind {
+; CHECK: error: couldn't allocate input reg for constraint 'cf'
+  tail call void asm "fadd.s fa0, fa0, $0", "^cf"(float 0.0)
+; CHECK: error: couldn't allocate input reg for constraint 'cf'
+  tail call void asm "fadd.d fa0, fa0, $0", "^cf"(double 0.0)
+  ret void
+}
+
 define void @constraint_r_fixed_vec() nounwind {
 ; CHECK: error: couldn't allocate input reg for constraint 'r'
   tail call void asm "add a0, a0, $0", "r"(<4 x i32> zeroinitializer)
@@ -42,3 +50,15 @@ define void @constraint_r_scalable_vec() nounwind {
   tail call void asm "add a0, a0, $0", "r"(<vscale x 4 x i32> zeroinitializer)
   ret void
 }
+
+define void @constraint_cr_fixed_vec() nounwind {
+; CHECK: error: couldn't allocate input reg for constraint 'cr'
+  tail call void asm "add a0, a0, $0", "^cr"(<4 x i32> zeroinitializer)
+  ret void
+}
+
+define void @constraint_cr_scalable_vec() nounwind {
+; CHECK: error: couldn't allocate input reg for constraint 'cr'
+  tail call void asm "add a0, a0, $0", "^cr"(<vscale x 4 x i32> zeroinitializer)
+  ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/inline-asm-zdinx-constraint-r.ll b/llvm/test/CodeGen/RISCV/inline-asm-zdinx-constraint-r.ll
new file mode 100644
index 00000000000000..15729ee2bc61e9
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/inline-asm-zdinx-constraint-r.ll
@@ -0,0 +1,92 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=riscv32 -mattr=+zdinx -target-abi=ilp32 -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV32FINX %s
+; RUN: llc -mtriple=riscv64 -mattr=+zdinx -target-abi=lp64 -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV64FINX %s
+
+ at gd = external global double
+
+define double @constraint_r_double(double %a) nounwind {
+; RV32FINX-LABEL: constraint_r_double:
+; RV32FINX:       # %bb.0:
+; RV32FINX-NEXT:    lui a2, %hi(gd)
+; RV32FINX-NEXT:    lw a3, %lo(gd+4)(a2)
+; RV32FINX-NEXT:    lw a2, %lo(gd)(a2)
+; RV32FINX-NEXT:    #APP
+; RV32FINX-NEXT:    fadd.d a0, a0, a2
+; RV32FINX-NEXT:    #NO_APP
+; RV32FINX-NEXT:    ret
+;
+; RV64FINX-LABEL: constraint_r_double:
+; RV64FINX:       # %bb.0:
+; RV64FINX-NEXT:    lui a1, %hi(gd)
+; RV64FINX-NEXT:    ld a1, %lo(gd)(a1)
+; RV64FINX-NEXT:    #APP
+; RV64FINX-NEXT:    fadd.d a0, a0, a1
+; RV64FINX-NEXT:    #NO_APP
+; RV64FINX-NEXT:    ret
+  %1 = load double, ptr @gd
+  %2 = tail call double asm "fadd.d $0, $1, $2", "=r,r,r"(double %a, double %1)
+  ret double %2
+}
+
+define double @constraint_cr_double(double %a) nounwind {
+; RV32FINX-LABEL: constraint_cr_double:
+; RV32FINX:       # %bb.0:
+; RV32FINX-NEXT:    lui a2, %hi(gd)
+; RV32FINX-NEXT:    lw a3, %lo(gd+4)(a2)
+; RV32FINX-NEXT:    lw a2, %lo(gd)(a2)
+; RV32FINX-NEXT:    #APP
+; RV32FINX-NEXT:    fadd.d a0, a0, a2
+; RV32FINX-NEXT:    #NO_APP
+; RV32FINX-NEXT:    ret
+;
+; RV64FINX-LABEL: constraint_cr_double:
+; RV64FINX:       # %bb.0:
+; RV64FINX-NEXT:    lui a1, %hi(gd)
+; RV64FINX-NEXT:    ld a1, %lo(gd)(a1)
+; RV64FINX-NEXT:    #APP
+; RV64FINX-NEXT:    fadd.d a0, a0, a1
+; RV64FINX-NEXT:    #NO_APP
+; RV64FINX-NEXT:    ret
+  %1 = load double, ptr @gd
+  %2 = tail call double asm "fadd.d $0, $1, $2", "=^cr,^cr,^cr"(double %a, double %1)
+  ret double %2
+}
+
+define double @constraint_double_abi_name(double %a) nounwind {
+; RV32FINX-LABEL: constraint_double_abi_name:
+; RV32FINX:       # %bb.0:
+; RV32FINX-NEXT:    addi sp, sp, -16
+; RV32FINX-NEXT:    sw s0, 12(sp) # 4-byte Folded Spill
+; RV32FINX-NEXT:    sw s1, 8(sp) # 4-byte Folded Spill
+; RV32FINX-NEXT:    lui a2, %hi(gd)
+; RV32FINX-NEXT:    lw s0, %lo(gd)(a2)
+; RV32FINX-NEXT:    lw s1, %lo(gd+4)(a2)
+; RV32FINX-NEXT:    #APP
+; RV32FINX-NEXT:    fadd.d t1, a0, s0
+; RV32FINX-NEXT:    #NO_APP
+; RV32FINX-NEXT:    mv a0, t1
+; RV32FINX-NEXT:    mv a1, t2
+; RV32FINX-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
+; RV32FINX-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
+; RV32FINX-NEXT:    addi sp, sp, 16
+; RV32FINX-NEXT:    ret
+;
+; RV64FINX-LABEL: constraint_double_abi_name:
+; RV64FINX:       # %bb.0:
+; RV64FINX-NEXT:    addi sp, sp, -16
+; RV64FINX-NEXT:    sd s0, 8(sp) # 8-byte Folded Spill
+; RV64FINX-NEXT:    lui a1, %hi(gd)
+; RV64FINX-NEXT:    ld s0, %lo(gd)(a1)
+; RV64FINX-NEXT:    #APP
+; RV64FINX-NEXT:    fadd.d t1, a0, s0
+; RV64FINX-NEXT:    #NO_APP
+; RV64FINX-NEXT:    mv a0, t1
+; RV64FINX-NEXT:    ld s0, 8(sp) # 8-byte Folded Reload
+; RV64FINX-NEXT:    addi sp, sp, 16
+; RV64FINX-NEXT:    ret
+  %1 = load double, ptr @gd
+  %2 = tail call double asm "fadd.d $0, $1, $2", "={t1},{a0},{s0}"(double %a, double %1)
+  ret double %2
+}
diff --git a/llvm/test/CodeGen/RISCV/inline-asm-zfh-constraint-f.ll b/llvm/test/CodeGen/RISCV/inline-asm-zfh-constraint-f.ll
index 8caf5956e7a7a7..83145ba69673d5 100644
--- a/llvm/test/CodeGen/RISCV/inline-asm-zfh-constraint-f.ll
+++ b/llvm/test/CodeGen/RISCV/inline-asm-zfh-constraint-f.ll
@@ -51,6 +51,47 @@ define half @constraint_f_half(half %a) nounwind {
   ret half %2
 }
 
+define half @constraint_cf_half(half %a) nounwind {
+; RV32ZFH-LABEL: constraint_cf_half:
+; RV32ZFH:       # %bb.0:
+; RV32ZFH-NEXT:    lui a0, %hi(gh)
+; RV32ZFH-NEXT:    flh fa5, %lo(gh)(a0)
+; RV32ZFH-NEXT:    #APP
+; RV32ZFH-NEXT:    fadd.h fa0, fa0, fa5
+; RV32ZFH-NEXT:    #NO_APP
+; RV32ZFH-NEXT:    ret
+;
+; RV64ZFH-LABEL: constraint_cf_half:
+; RV64ZFH:       # %bb.0:
+; RV64ZFH-NEXT:    lui a0, %hi(gh)
+; RV64ZFH-NEXT:    flh fa5, %lo(gh)(a0)
+; RV64ZFH-NEXT:    #APP
+; RV64ZFH-NEXT:    fadd.h fa0, fa0, fa5
+; RV64ZFH-NEXT:    #NO_APP
+; RV64ZFH-NEXT:    ret
+;
+; RV32DZFH-LABEL: constraint_cf_half:
+; RV32DZFH:       # %bb.0:
+; RV32DZFH-NEXT:    lui a0, %hi(gh)
+; RV32DZFH-NEXT:    flh fa5, %lo(gh)(a0)
+; RV32DZFH-NEXT:    #APP
+; RV32DZFH-NEXT:    fadd.h fa0, fa0, fa5
+; RV32DZFH-NEXT:    #NO_APP
+; RV32DZFH-NEXT:    ret
+;
+; RV64DZFH-LABEL: constraint_cf_half:
+; RV64DZFH:       # %bb.0:
+; RV64DZFH-NEXT:    lui a0, %hi(gh)
+; RV64DZFH-NEXT:    flh fa5, %lo(gh)(a0)
+; RV64DZFH-NEXT:    #APP
+; RV64DZFH-NEXT:    fadd.h fa0, fa0, fa5
+; RV64DZFH-NEXT:    #NO_APP
+; RV64DZFH-NEXT:    ret
+  %1 = load half, ptr @gh
+  %2 = tail call half asm "fadd.h $0, $1, $2", "=^cf,^cf,^cf"(half %a, half %1)
+  ret half %2
+}
+
 define half @constraint_f_half_abi_name(half %a) nounwind {
 ; RV32ZFH-LABEL: constraint_f_half_abi_name:
 ; RV32ZFH:       # %bb.0:
diff --git a/llvm/test/CodeGen/RISCV/inline-asm-zfh-modifier-N.ll b/llvm/test/CodeGen/RISCV/inline-asm-zfh-modifier-N.ll
new file mode 100644
index 00000000000000..d1eb2a2d8b102a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/inline-asm-zfh-modifier-N.ll
@@ -0,0 +1,157 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=zfh -verify-machineinstrs -no-integrated-as < %s \
+; RUN:   -target-abi=ilp32f | FileCheck -check-prefix=RV32ZFH %s
+; RUN: llc -mtriple=riscv64 -mattr=zfh -verify-machineinstrs -no-integrated-as < %s \
+; RUN:   -target-abi=lp64f | FileCheck -check-prefix=RV64ZFH %s
+; RUN: llc -mtriple=riscv32 -mattr=zfh,+d -verify-machineinstrs -no-integrated-as < %s \
+; RUN:   -target-abi=ilp32d | FileCheck -check-prefix=RV32DZFH %s
+; RUN: llc -mtriple=riscv64 -mattr=zfh,+d -verify-machineinstrs -no-integrated-as < %s \
+; RUN:   -target-abi=lp64d | FileCheck -check-prefix=RV64DZFH %s
+
+;; `.insn 0x4, 0x04000053 | (${0:N} << 7) | (${1:N} << 15) | (${2:N} << 20)` is
+;; the raw encoding for `fadd.h`
+
+ at gh = external global half
+
+define half @constraint_f_half(half %a) nounwind {
+; RV32ZFH-LABEL: constraint_f_half:
+; RV32ZFH:       # %bb.0:
+; RV32ZFH-NEXT:    lui a0, %hi(gh)
+; RV32ZFH-NEXT:    flh fa5, %lo(gh)(a0)
+; RV32ZFH-NEXT:    #APP
+; RV32ZFH-NEXT:    .insn 0x4, 0x04000053 | (10 << 7) | (10 << 15) | (15 << 20)
+; RV32ZFH-NEXT:    #NO_APP
+; RV32ZFH-NEXT:    ret
+;
+; RV64ZFH-LABEL: constraint_f_half:
+; RV64ZFH:       # %bb.0:
+; RV64ZFH-NEXT:    lui a0, %hi(gh)
+; RV64ZFH-NEXT:    flh fa5, %lo(gh)(a0)
+; RV64ZFH-NEXT:    #APP
+; RV64ZFH-NEXT:    .insn 0x4, 0x04000053 | (10 << 7) | (10 << 15) | (15 << 20)
+; RV64ZFH-NEXT:    #NO_APP
+; RV64ZFH-NEXT:    ret
+;
+; RV32DZFH-LABEL: constraint_f_half:
+; RV32DZFH:       # %bb.0:
+; RV32DZFH-NEXT:    lui a0, %hi(gh)
+; RV32DZFH-NEXT:    flh fa5, %lo(gh)(a0)
+; RV32DZFH-NEXT:    #APP
+; RV32DZFH-NEXT:    .insn 0x4, 0x04000053 | (10 << 7) | (10 << 15) | (15 << 20)
+; RV32DZFH-NEXT:    #NO_APP
+; RV32DZFH-NEXT:    ret
+;
+; RV64DZFH-LABEL: constraint_f_half:
+; RV64DZFH:       # %bb.0:
+; RV64DZFH-NEXT:    lui a0, %hi(gh)
+; RV64DZFH-NEXT:    flh fa5, %lo(gh)(a0)
+; RV64DZFH-NEXT:    #APP
+; RV64DZFH-NEXT:    .insn 0x4, 0x04000053 | (10 << 7) | (10 << 15) | (15 << 20)
+; RV64DZFH-NEXT:    #NO_APP
+; RV64DZFH-NEXT:    ret
+  %1 = load half, ptr @gh
+  %2 = tail call half asm ".insn 0x4, 0x04000053 | (${0:N} << 7) | (${1:N} << 15) | (${2:N} << 20)", "=f,f,f"(half %a, half %1)
+  ret half %2
+}
+
+define half @constraint_cf_half(half %a) nounwind {
+; RV32ZFH-LABEL: constraint_cf_half:
+; RV32ZFH:       # %bb.0:
+; RV32ZFH-NEXT:    lui a0, %hi(gh)
+; RV32ZFH-NEXT:    flh fa5, %lo(gh)(a0)
+; RV32ZFH-NEXT:    #APP
+; RV32ZFH-NEXT:    .insn 0x4, 0x04000053 | (10 << 7) | (10 << 15) | (15 << 20)
+; RV32ZFH-NEXT:    #NO_APP
+; RV32ZFH-NEXT:    ret
+;
+; RV64ZFH-LABEL: constraint_cf_half:
+; RV64ZFH:       # %bb.0:
+; RV64ZFH-NEXT:    lui a0, %hi(gh)
+; RV64ZFH-NEXT:    flh fa5, %lo(gh)(a0)
+; RV64ZFH-NEXT:    #APP
+; RV64ZFH-NEXT:    .insn 0x4, 0x04000053 | (10 << 7) | (10 << 15) | (15 << 20)
+; RV64ZFH-NEXT:    #NO_APP
+; RV64ZFH-NEXT:    ret
+;
+; RV32DZFH-LABEL: constraint_cf_half:
+; RV32DZFH:       # %bb.0:
+; RV32DZFH-NEXT:    lui a0, %hi(gh)
+; RV32DZFH-NEXT:    flh fa5, %lo(gh)(a0)
+; RV32DZFH-NEXT:    #APP
+; RV32DZFH-NEXT:    .insn 0x4, 0x04000053 | (10 << 7) | (10 << 15) | (15 << 20)
+; RV32DZFH-NEXT:    #NO_APP
+; RV32DZFH-NEXT:    ret
+;
+; RV64DZFH-LABEL: constraint_cf_half:
+; RV64DZFH:       # %bb.0:
+; RV64DZFH-NEXT:    lui a0, %hi(gh)
+; RV64DZFH-NEXT:    flh fa5, %lo(gh)(a0)
+; RV64DZFH-NEXT:    #APP
+; RV64DZFH-NEXT:    .insn 0x4, 0x04000053 | (10 << 7) | (10 << 15) | (15 << 20)
+; RV64DZFH-NEXT:    #NO_APP
+; RV64DZFH-NEXT:    ret
+  %1 = load half, ptr @gh
+  %2 = tail call half asm ".insn 0x4, 0x04000053 | (${0:N} << 7) | (${1:N} << 15) | (${2:N} << 20)", "=^cf,^cf,^cf"(half %a, half %1)
+  ret half %2
+}
+
+define half @constraint_f_half_abi_name(half %a) nounwind {
+; RV32ZFH-LABEL: constraint_f_half_abi_name:
+; RV32ZFH:       # %bb.0:
+; RV32ZFH-NEXT:    addi sp, sp, -16
+; RV32ZFH-NEXT:    fsw fs0, 12(sp) # 4-byte Folded Spill
+; RV32ZFH-NEXT:    lui a0, %hi(gh)
+; RV32ZFH-NEXT:    flh fs0, %lo(gh)(a0)
+; RV32ZFH-NEXT:    #APP
+; RV32ZFH-NEXT:    .insn 0x4, 0x04000053 | (0 << 7) | (10 << 15) | (8 << 20)
+; RV32ZFH-NEXT:    #NO_APP
+; RV32ZFH-NEXT:    fmv.h fa0, ft0
+; RV32ZFH-NEXT:    flw fs0, 12(sp) # 4-byte Folded Reload
+; RV32ZFH-NEXT:    addi sp, sp, 16
+; RV32ZFH-NEXT:    ret
+;
+; RV64ZFH-LABEL: constraint_f_half_abi_name:
+; RV64ZFH:       # %bb.0:
+; RV64ZFH-NEXT:    addi sp, sp, -16
+; RV64ZFH-NEXT:    fsw fs0, 12(sp) # 4-byte Folded Spill
+; RV64ZFH-NEXT:    lui a0, %hi(gh)
+; RV64ZFH-NEXT:    flh fs0, %lo(gh)(a0)
+; RV64ZFH-NEXT:    #APP
+; RV64ZFH-NEXT:    .insn 0x4, 0x04000053 | (0 << 7) | (10 << 15) | (8 << 20)
+; RV64ZFH-NEXT:    #NO_APP
+; RV64ZFH-NEXT:    fmv.h fa0, ft0
+; RV64ZFH-NEXT:    flw fs0, 12(sp) # 4-byte Folded Reload
+; RV64ZFH-NEXT:    addi sp, sp, 16
+; RV64ZFH-NEXT:    ret
+;
+; RV32DZFH-LABEL: constraint_f_half_abi_name:
+; RV32DZFH:       # %bb.0:
+; RV32DZFH-NEXT:    addi sp, sp, -16
+; RV32DZFH-NEXT:    fsd fs0, 8(sp) # 8-byte Folded Spill
+; RV32DZFH-NEXT:    lui a0, %hi(gh)
+; RV32DZFH-NEXT:    flh fs0, %lo(gh)(a0)
+; RV32DZFH-NEXT:    #APP
+; RV32DZFH-NEXT:    .insn 0x4, 0x04000053 | (0 << 7) | (10 << 15) | (8 << 20)
+; RV32DZFH-NEXT:    #NO_APP
+; RV32DZFH-NEXT:    fmv.h fa0, ft0
+; RV32DZFH-NEXT:    fld fs0, 8(sp) # 8-byte Folded Reload
+; RV32DZFH-NEXT:    addi sp, sp, 16
+; RV32DZFH-NEXT:    ret
+;
+; RV64DZFH-LABEL: constraint_f_half_abi_name:
+; RV64DZFH:       # %bb.0:
+; RV64DZFH-NEXT:    addi sp, sp, -16
+; RV64DZFH-NEXT:    fsd fs0, 8(sp) # 8-byte Folded Spill
+; RV64DZFH-NEXT:    lui a0, %hi(gh)
+; RV64DZFH-NEXT:    flh fs0, %lo(gh)(a0)
+; RV64DZFH-NEXT:    #APP
+; RV64DZFH-NEXT:    .insn 0x4, 0x04000053 | (0 << 7) | (10 << 15) | (8 << 20)
+; RV64DZFH-NEXT:    #NO_APP
+; RV64DZFH-NEXT:    fmv.h fa0, ft0
+; RV64DZFH-NEXT:    fld fs0, 8(sp) # 8-byte Folded Reload
+; RV64DZFH-NEXT:    addi sp, sp, 16
+; RV64DZFH-NEXT:    ret
+  %1 = load half, ptr @gh
+  %2 = tail call half asm ".insn 0x4, 0x04000053 | (${0:N} << 7) | (${1:N} << 15) | (${2:N} << 20)", "={ft0},{fa0},{fs0}"(half %a, half %1)
+  ret half %2
+}
diff --git a/llvm/test/CodeGen/RISCV/inline-asm-zfinx-constraint-r.ll b/llvm/test/CodeGen/RISCV/inline-asm-zfinx-constraint-r.ll
new file mode 100644
index 00000000000000..a8d3515fe1890e
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/inline-asm-zfinx-constraint-r.ll
@@ -0,0 +1,89 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=riscv32 -mattr=+zfinx -target-abi=ilp32 -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV32FINX %s
+; RUN: llc -mtriple=riscv64 -mattr=+zfinx -target-abi=lp64 -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV64FINX %s
+
+ at gf = external global float
+
+define float @constraint_r_float(float %a) nounwind {
+; RV32FINX-LABEL: constraint_r_float:
+; RV32FINX:       # %bb.0:
+; RV32FINX-NEXT:    lui a1, %hi(gf)
+; RV32FINX-NEXT:    lw a1, %lo(gf)(a1)
+; RV32FINX-NEXT:    #APP
+; RV32FINX-NEXT:    fadd.s a0, a0, a1
+; RV32FINX-NEXT:    #NO_APP
+; RV32FINX-NEXT:    ret
+;
+; RV64FINX-LABEL: constraint_r_float:
+; RV64FINX:       # %bb.0:
+; RV64FINX-NEXT:    lui a1, %hi(gf)
+; RV64FINX-NEXT:    lw a1, %lo(gf)(a1)
+; RV64FINX-NEXT:    #APP
+; RV64FINX-NEXT:    fadd.s a0, a0, a1
+; RV64FINX-NEXT:    #NO_APP
+; RV64FINX-NEXT:    ret
+  %1 = load float, ptr @gf
+  %2 = tail call float asm "fadd.s $0, $1, $2", "=r,r,r"(float %a, float %1)
+  ret float %2
+}
+
+define float @constraint_cr_float(float %a) nounwind {
+; RV32FINX-LABEL: constraint_cr_float:
+; RV32FINX:       # %bb.0:
+; RV32FINX-NEXT:    lui a1, %hi(gf)
+; RV32FINX-NEXT:    lw a1, %lo(gf)(a1)
+; RV32FINX-NEXT:    #APP
+; RV32FINX-NEXT:    fadd.s a0, a0, a1
+; RV32FINX-NEXT:    #NO_APP
+; RV32FINX-NEXT:    ret
+;
+; RV64FINX-LABEL: constraint_cr_float:
+; RV64FINX:       # %bb.0:
+; RV64FINX-NEXT:    lui a1, %hi(gf)
+; RV64FINX-NEXT:    lw a1, %lo(gf)(a1)
+; RV64FINX-NEXT:    #APP
+; RV64FINX-NEXT:    fadd.s a0, a0, a1
+; RV64FINX-NEXT:    #NO_APP
+; RV64FINX-NEXT:    ret
+  %1 = load float, ptr @gf
+  %2 = tail call float asm "fadd.s $0, $1, $2", "=^cr,cr,cr"(float %a, float %1)
+  ret float %2
+}
+
+define float @constraint_float_abi_name(float %a) nounwind {
+; RV32FINX-LABEL: constraint_float_abi_name:
+; RV32FINX:       # %bb.0:
+; RV32FINX-NEXT:    addi sp, sp, -16
+; RV32FINX-NEXT:    sw s0, 12(sp) # 4-byte Folded Spill
+; RV32FINX-NEXT:    lui a1, %hi(gf)
+; RV32FINX-NEXT:    lw s0, %lo(gf)(a1)
+; RV32FINX-NEXT:    # kill: def $x10_w killed $x10_w def $x10
+; RV32FINX-NEXT:    #APP
+; RV32FINX-NEXT:    fadd.s t0, a0, s0
+; RV32FINX-NEXT:    #NO_APP
+; RV32FINX-NEXT:    mv a0, t0
+; RV32FINX-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
+; RV32FINX-NEXT:    addi sp, sp, 16
+; RV32FINX-NEXT:    ret
+;
+; RV64FINX-LABEL: constraint_float_abi_name:
+; RV64FINX:       # %bb.0:
+; RV64FINX-NEXT:    addi sp, sp, -16
+; RV64FINX-NEXT:    sd s0, 8(sp) # 8-byte Folded Spill
+; RV64FINX-NEXT:    lui a1, %hi(gf)
+; RV64FINX-NEXT:    lw s0, %lo(gf)(a1)
+; RV64FINX-NEXT:    # kill: def $x10_w killed $x10_w def $x10
+; RV64FINX-NEXT:    #APP
+; RV64FINX-NEXT:    fadd.s t0, a0, s0
+; RV64FINX-NEXT:    #NO_APP
+; RV64FINX-NEXT:    mv a0, t0
+; RV64FINX-NEXT:    ld s0, 8(sp) # 8-byte Folded Reload
+; RV64FINX-NEXT:    addi sp, sp, 16
+; RV64FINX-NEXT:    ret
+  %1 = load float, ptr @gf
+  %2 = tail call float asm "fadd.s $0, $1, $2", "={t0},{a0},{s0}"(float %a, float %1)
+  ret float %2
+}
+
diff --git a/llvm/test/CodeGen/RISCV/inline-asm-zhinx-constraint-r.ll b/llvm/test/CodeGen/RISCV/inline-asm-zhinx-constraint-r.ll
new file mode 100644
index 00000000000000..f9707c6c8995dc
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/inline-asm-zhinx-constraint-r.ll
@@ -0,0 +1,158 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=riscv32 -mattr=+zhinx -verify-machineinstrs < %s \
+; RUN:   -target-abi=ilp32 | FileCheck -check-prefix=RV32ZHINX %s
+; RUN: llc -mtriple=riscv64 -mattr=+zhinx -verify-machineinstrs < %s \
+; RUN:   -target-abi=lp64 | FileCheck -check-prefix=RV64ZHINX %s
+; RUN: llc -mtriple=riscv32 -mattr=+zdinx,+zhinx -verify-machineinstrs < %s \
+; RUN:   -target-abi=ilp32 | FileCheck -check-prefix=RV32DINXZHINX %s
+; RUN: llc -mtriple=riscv64 -mattr=+zdinx,+zhinx -verify-machineinstrs < %s \
+; RUN:   -target-abi=lp64 | FileCheck -check-prefix=RV64DINXZHINX %s
+
+ at gh = external global half
+
+define half @constraint_r_half(half %a) nounwind {
+; RV32ZHINX-LABEL: constraint_r_half:
+; RV32ZHINX:       # %bb.0:
+; RV32ZHINX-NEXT:    lui a1, %hi(gh)
+; RV32ZHINX-NEXT:    lh a1, %lo(gh)(a1)
+; RV32ZHINX-NEXT:    #APP
+; RV32ZHINX-NEXT:    fadd.h a0, a0, a1
+; RV32ZHINX-NEXT:    #NO_APP
+; RV32ZHINX-NEXT:    ret
+;
+; RV64ZHINX-LABEL: constraint_r_half:
+; RV64ZHINX:       # %bb.0:
+; RV64ZHINX-NEXT:    lui a1, %hi(gh)
+; RV64ZHINX-NEXT:    lh a1, %lo(gh)(a1)
+; RV64ZHINX-NEXT:    #APP
+; RV64ZHINX-NEXT:    fadd.h a0, a0, a1
+; RV64ZHINX-NEXT:    #NO_APP
+; RV64ZHINX-NEXT:    ret
+;
+; RV32DINXZHINX-LABEL: constraint_r_half:
+; RV32DINXZHINX:       # %bb.0:
+; RV32DINXZHINX-NEXT:    lui a1, %hi(gh)
+; RV32DINXZHINX-NEXT:    lh a1, %lo(gh)(a1)
+; RV32DINXZHINX-NEXT:    #APP
+; RV32DINXZHINX-NEXT:    fadd.h a0, a0, a1
+; RV32DINXZHINX-NEXT:    #NO_APP
+; RV32DINXZHINX-NEXT:    ret
+;
+; RV64DINXZHINX-LABEL: constraint_r_half:
+; RV64DINXZHINX:       # %bb.0:
+; RV64DINXZHINX-NEXT:    lui a1, %hi(gh)
+; RV64DINXZHINX-NEXT:    lh a1, %lo(gh)(a1)
+; RV64DINXZHINX-NEXT:    #APP
+; RV64DINXZHINX-NEXT:    fadd.h a0, a0, a1
+; RV64DINXZHINX-NEXT:    #NO_APP
+; RV64DINXZHINX-NEXT:    ret
+  %1 = load half, ptr @gh
+  %2 = tail call half asm "fadd.h $0, $1, $2", "=r,r,r"(half %a, half %1)
+  ret half %2
+}
+
+define half @constraint_cr_half(half %a) nounwind {
+; RV32ZHINX-LABEL: constraint_cr_half:
+; RV32ZHINX:       # %bb.0:
+; RV32ZHINX-NEXT:    lui a1, %hi(gh)
+; RV32ZHINX-NEXT:    lh a1, %lo(gh)(a1)
+; RV32ZHINX-NEXT:    #APP
+; RV32ZHINX-NEXT:    fadd.h a0, a0, a1
+; RV32ZHINX-NEXT:    #NO_APP
+; RV32ZHINX-NEXT:    ret
+;
+; RV64ZHINX-LABEL: constraint_cr_half:
+; RV64ZHINX:       # %bb.0:
+; RV64ZHINX-NEXT:    lui a1, %hi(gh)
+; RV64ZHINX-NEXT:    lh a1, %lo(gh)(a1)
+; RV64ZHINX-NEXT:    #APP
+; RV64ZHINX-NEXT:    fadd.h a0, a0, a1
+; RV64ZHINX-NEXT:    #NO_APP
+; RV64ZHINX-NEXT:    ret
+;
+; RV32DINXZHINX-LABEL: constraint_cr_half:
+; RV32DINXZHINX:       # %bb.0:
+; RV32DINXZHINX-NEXT:    lui a1, %hi(gh)
+; RV32DINXZHINX-NEXT:    lh a1, %lo(gh)(a1)
+; RV32DINXZHINX-NEXT:    #APP
+; RV32DINXZHINX-NEXT:    fadd.h a0, a0, a1
+; RV32DINXZHINX-NEXT:    #NO_APP
+; RV32DINXZHINX-NEXT:    ret
+;
+; RV64DINXZHINX-LABEL: constraint_cr_half:
+; RV64DINXZHINX:       # %bb.0:
+; RV64DINXZHINX-NEXT:    lui a1, %hi(gh)
+; RV64DINXZHINX-NEXT:    lh a1, %lo(gh)(a1)
+; RV64DINXZHINX-NEXT:    #APP
+; RV64DINXZHINX-NEXT:    fadd.h a0, a0, a1
+; RV64DINXZHINX-NEXT:    #NO_APP
+; RV64DINXZHINX-NEXT:    ret
+  %1 = load half, ptr @gh
+  %2 = tail call half asm "fadd.h $0, $1, $2", "=^cr,^cr,^cr"(half %a, half %1)
+  ret half %2
+}
+
+define half @constraint_half_abi_name(half %a) nounwind {
+; RV32ZHINX-LABEL: constraint_half_abi_name:
+; RV32ZHINX:       # %bb.0:
+; RV32ZHINX-NEXT:    addi sp, sp, -16
+; RV32ZHINX-NEXT:    sw s0, 12(sp) # 4-byte Folded Spill
+; RV32ZHINX-NEXT:    lui a1, %hi(gh)
+; RV32ZHINX-NEXT:    lh s0, %lo(gh)(a1)
+; RV32ZHINX-NEXT:    # kill: def $x10_h killed $x10_h def $x10
+; RV32ZHINX-NEXT:    #APP
+; RV32ZHINX-NEXT:    fadd.s t0, a0, s0
+; RV32ZHINX-NEXT:    #NO_APP
+; RV32ZHINX-NEXT:    mv a0, t0
+; RV32ZHINX-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
+; RV32ZHINX-NEXT:    addi sp, sp, 16
+; RV32ZHINX-NEXT:    ret
+;
+; RV64ZHINX-LABEL: constraint_half_abi_name:
+; RV64ZHINX:       # %bb.0:
+; RV64ZHINX-NEXT:    addi sp, sp, -16
+; RV64ZHINX-NEXT:    sd s0, 8(sp) # 8-byte Folded Spill
+; RV64ZHINX-NEXT:    lui a1, %hi(gh)
+; RV64ZHINX-NEXT:    lh s0, %lo(gh)(a1)
+; RV64ZHINX-NEXT:    # kill: def $x10_h killed $x10_h def $x10
+; RV64ZHINX-NEXT:    #APP
+; RV64ZHINX-NEXT:    fadd.s t0, a0, s0
+; RV64ZHINX-NEXT:    #NO_APP
+; RV64ZHINX-NEXT:    mv a0, t0
+; RV64ZHINX-NEXT:    ld s0, 8(sp) # 8-byte Folded Reload
+; RV64ZHINX-NEXT:    addi sp, sp, 16
+; RV64ZHINX-NEXT:    ret
+;
+; RV32DINXZHINX-LABEL: constraint_half_abi_name:
+; RV32DINXZHINX:       # %bb.0:
+; RV32DINXZHINX-NEXT:    addi sp, sp, -16
+; RV32DINXZHINX-NEXT:    sw s0, 12(sp) # 4-byte Folded Spill
+; RV32DINXZHINX-NEXT:    lui a1, %hi(gh)
+; RV32DINXZHINX-NEXT:    lh s0, %lo(gh)(a1)
+; RV32DINXZHINX-NEXT:    # kill: def $x10_h killed $x10_h def $x10
+; RV32DINXZHINX-NEXT:    #APP
+; RV32DINXZHINX-NEXT:    fadd.s t0, a0, s0
+; RV32DINXZHINX-NEXT:    #NO_APP
+; RV32DINXZHINX-NEXT:    mv a0, t0
+; RV32DINXZHINX-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
+; RV32DINXZHINX-NEXT:    addi sp, sp, 16
+; RV32DINXZHINX-NEXT:    ret
+;
+; RV64DINXZHINX-LABEL: constraint_half_abi_name:
+; RV64DINXZHINX:       # %bb.0:
+; RV64DINXZHINX-NEXT:    addi sp, sp, -16
+; RV64DINXZHINX-NEXT:    sd s0, 8(sp) # 8-byte Folded Spill
+; RV64DINXZHINX-NEXT:    lui a1, %hi(gh)
+; RV64DINXZHINX-NEXT:    lh s0, %lo(gh)(a1)
+; RV64DINXZHINX-NEXT:    # kill: def $x10_h killed $x10_h def $x10
+; RV64DINXZHINX-NEXT:    #APP
+; RV64DINXZHINX-NEXT:    fadd.s t0, a0, s0
+; RV64DINXZHINX-NEXT:    #NO_APP
+; RV64DINXZHINX-NEXT:    mv a0, t0
+; RV64DINXZHINX-NEXT:    ld s0, 8(sp) # 8-byte Folded Reload
+; RV64DINXZHINX-NEXT:    addi sp, sp, 16
+; RV64DINXZHINX-NEXT:    ret
+  %1 = load half, ptr @gh
+  %2 = tail call half asm "fadd.s $0, $1, $2", "={t0},{a0},{s0}"(half %a, half %1)
+  ret half %2
+}
diff --git a/llvm/test/CodeGen/RISCV/inline-asm.ll b/llvm/test/CodeGen/RISCV/inline-asm.ll
index cb240d2dc68d0c..79266743a1d051 100644
--- a/llvm/test/CodeGen/RISCV/inline-asm.ll
+++ b/llvm/test/CodeGen/RISCV/inline-asm.ll
@@ -56,6 +56,29 @@ define i32 @constraint_r_zero(i32 %a) nounwind {
   ret i32 %2
 }
 
+define i32 @constraint_cr(i32 %a) nounwind {
+; RV32I-LABEL: constraint_cr:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lui a1, %hi(gi)
+; RV32I-NEXT:    lw a1, %lo(gi)(a1)
+; RV32I-NEXT:    #APP
+; RV32I-NEXT:    c.add a0, a0, a1
+; RV32I-NEXT:    #NO_APP
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: constraint_cr:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    lui a1, %hi(gi)
+; RV64I-NEXT:    lw a1, %lo(gi)(a1)
+; RV64I-NEXT:    #APP
+; RV64I-NEXT:    c.add a0, a0, a1
+; RV64I-NEXT:    #NO_APP
+; RV64I-NEXT:    ret
+  %1 = load i32, ptr @gi
+  %2 = tail call i32 asm "c.add $0, $1, $2", "=^cr,0,^cr"(i32 %a, i32 %1)
+  ret i32 %2
+}
+
 define i32 @constraint_i(i32 %a) nounwind {
 ; RV32I-LABEL: constraint_i:
 ; RV32I:       # %bb.0:
@@ -215,6 +238,49 @@ define i32 @modifier_i_reg(i32 %a, i32 %b) nounwind {
   ret i32 %1
 }
 
+;; `.insn 0x4, 0x33 | (${0:N} << 7) | (${1:N} << 15) | (${2:N} << 20)` is the
+;; raw encoding of `add`
+
+define i32 @modifier_N_reg(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: modifier_N_reg:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    #APP
+; RV32I-NEXT:    .insn 0x4, 0x33 | (10 << 7) | (10 << 15) | (11 << 20)
+; RV32I-NEXT:    #NO_APP
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: modifier_N_reg:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    #APP
+; RV64I-NEXT:    .insn 0x4, 0x33 | (10 << 7) | (10 << 15) | (11 << 20)
+; RV64I-NEXT:    #NO_APP
+; RV64I-NEXT:    ret
+  %1 = tail call i32 asm ".insn 0x4, 0x33 | (${0:N} << 7) | (${1:N} << 15) | (${2:N} << 20)", "=r,r,r"(i32 %a, i32 %b)
+  ret i32 %1
+}
+
+;; `.insn 0x2, 0x9422 | (${0:N} << 7) | (${2:N} << 2)` is the raw encoding of
+;; `c.add` (note the constraint that the first input should be the same as the
+;; output).
+
+define i32 @modifier_N_with_cr_reg(i32 %a, i32 %b) nounwind {
+; RV32I-LABEL: modifier_N_with_cr_reg:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    #APP
+; RV32I-NEXT:    .insn 0x2, 0x9422 | (10 << 7) | (11 << 2)
+; RV32I-NEXT:    #NO_APP
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: modifier_N_with_cr_reg:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    #APP
+; RV64I-NEXT:    .insn 0x2, 0x9422 | (10 << 7) | (11 << 2)
+; RV64I-NEXT:    #NO_APP
+; RV64I-NEXT:    ret
+  %1 = tail call i32 asm ".insn 0x2, 0x9422 | (${0:N} << 7) | (${2:N} << 2)", "=^cr,0,^cr"(i32 %a, i32 %b)
+  ret i32 %1
+}
+
 define void @operand_global() nounwind {
 ; RV32I-LABEL: operand_global:
 ; RV32I:       # %bb.0:
diff --git a/llvm/test/CodeGen/RISCV/zdinx-asm-constraint.ll b/llvm/test/CodeGen/RISCV/zdinx-asm-constraint.ll
index 95695aa697764a..18bd41a210f53f 100644
--- a/llvm/test/CodeGen/RISCV/zdinx-asm-constraint.ll
+++ b/llvm/test/CodeGen/RISCV/zdinx-asm-constraint.ll
@@ -1,6 +1,11 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
 ; RUN: llc -mtriple=riscv32 -mattr=+zdinx -verify-machineinstrs < %s \
 ; RUN:   -target-abi=ilp32 -mattr=+zhinx | FileCheck %s
+
+;; These tests cover the use of `r` and `cr` constraints for floating point values on rv32.
+;;
+;; In particular, there is significant complexity around using paired GPRs for double values on rv32.
+
 define dso_local void @zdinx_asm(ptr nocapture noundef writeonly %a, double noundef %b, double noundef %c) nounwind {
 ; CHECK-LABEL: zdinx_asm:
 ; CHECK:       # %bb.0: # %entry
@@ -50,3 +55,59 @@ entry:
   store half %0, ptr %arrayidx, align 8
   ret void
 }
+
+define dso_local void @zdinx_asm_cr(ptr nocapture noundef writeonly %a, double noundef %b, double noundef %c) nounwind {
+; CHECK-LABEL: zdinx_asm_cr:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sw s0, 12(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    sw s1, 8(sp) # 4-byte Folded Spill
+; CHECK-NEXT:    mv a5, a4
+; CHECK-NEXT:    mv s1, a2
+; CHECK-NEXT:    mv a4, a3
+; CHECK-NEXT:    mv s0, a1
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    fsgnjx.d a2, s0, a4
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    sw a2, 8(a0)
+; CHECK-NEXT:    sw a3, 12(a0)
+; CHECK-NEXT:    lw s0, 12(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    lw s1, 8(sp) # 4-byte Folded Reload
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+entry:
+  %arrayidx = getelementptr inbounds double, ptr %a, i32 1
+  %0 = tail call double asm "fsgnjx.d $0, $1, $2", "=^cr,^cr,^cr"(double %b, double %c)
+  store double %0, ptr %arrayidx, align 8
+  ret void
+}
+
+define dso_local void @zfinx_asm_cr(ptr nocapture noundef writeonly %a, float noundef %b, float noundef %c) nounwind {
+; CHECK-LABEL: zfinx_asm_cr:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    fsgnjx.s a1, a1, a2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    sw a1, 4(a0)
+; CHECK-NEXT:    ret
+entry:
+  %arrayidx = getelementptr inbounds float, ptr %a, i32 1
+  %0 = tail call float asm "fsgnjx.s $0, $1, $2", "=^cr,^cr,^cr"(float %b, float %c)
+  store float %0, ptr %arrayidx, align 8
+  ret void
+}
+
+define dso_local void @zhinx_asm_cr(ptr nocapture noundef writeonly %a, half noundef %b, half noundef %c) nounwind {
+; CHECK-LABEL: zhinx_asm_cr:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    fsgnjx.h a1, a1, a2
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    sh a1, 2(a0)
+; CHECK-NEXT:    ret
+entry:
+  %arrayidx = getelementptr inbounds half, ptr %a, i32 1
+  %0 = tail call half asm "fsgnjx.h $0, $1, $2", "=^cr,^cr,^cr"(half %b, half %c)
+  store half %0, ptr %arrayidx, align 8
+  ret void
+}

>From 99a232d48e9a978fc18035a67fd00df17e092260 Mon Sep 17 00:00:00 2001
From: Sam Elliott <quic_aelliott at quicinc.com>
Date: Wed, 16 Oct 2024 07:55:58 -0700
Subject: [PATCH 2/2] [RISCV][ISel] Ensure 'in X' Constraints prevent X0

I'm not sure if this fix is required, but I've written the patch anyway.
We can drop this commit if we don't think it's a bug. This does not
cause test changes, but we haven't got tests that try to use all 32
registers in inline assembly.

Broadly, for GPRs, we made the explicit choice that `r` constraints
would never attempt to use `x0`, because `x0` isn't really usable like
the other GPRs. I believe the same thing applies to `Zhinx`, `Zfinx` and
`Zdinx` because they should not be allocating operands to `x0` either,
so this patch introduces new `NoX0` classes for `GPRF16` and `GPRF32`
registers, and uses them with inline assembly. There is also a
`GPRPairNoX0` for the `Zdinx` case on rv32, avoiding use of the `x0`
pair which has different behaviour to the other GPR pairs.
---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 6 +++---
 llvm/lib/Target/RISCV/RISCVRegisterInfo.td  | 3 +++
 2 files changed, 6 insertions(+), 3 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index aa34676d2b2a61..ad777745fda313 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -20248,11 +20248,11 @@ RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
       if (VT.isVector())
         break;
       if (VT == MVT::f16 && Subtarget.hasStdExtZhinxmin())
-        return std::make_pair(0U, &RISCV::GPRF16RegClass);
+        return std::make_pair(0U, &RISCV::GPRF16NoX0RegClass);
       if (VT == MVT::f32 && Subtarget.hasStdExtZfinx())
-        return std::make_pair(0U, &RISCV::GPRF32RegClass);
+        return std::make_pair(0U, &RISCV::GPRF32NoX0RegClass);
       if (VT == MVT::f64 && Subtarget.hasStdExtZdinx() && !Subtarget.is64Bit())
-        return std::make_pair(0U, &RISCV::GPRPairRegClass);
+        return std::make_pair(0U, &RISCV::GPRPairNoX0RegClass);
       return std::make_pair(0U, &RISCV::GPRNoX0RegClass);
     case 'f':
       if (Subtarget.hasStdExtZfhmin() && VT == MVT::f16)
diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
index 250f3c10f309bf..685f04213afa86 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
@@ -661,6 +661,7 @@ def GPRF16 : RISCVRegisterClass<[f16], 16, (add (sequence "X%u_H", 10, 17),
                                                 (sequence "X%u_H", 0, 4))>;
 def GPRF16C : RISCVRegisterClass<[f16], 16, (add (sequence "X%u_H", 10, 15),
                                                  (sequence "X%u_H", 8, 9))>;
+def GPRF16NoX0 : RISCVRegisterClass<[f16], 16, (sub GPRF16, X0_H)>;
 
 def GPRF32 : RISCVRegisterClass<[f32], 32, (add (sequence "X%u_W", 10, 17),
                                                 (sequence "X%u_W", 5, 7),
@@ -721,6 +722,8 @@ def GPRPair : RISCVRegisterClass<[XLenPairFVT], 64, (add
 def GPRPairC : RISCVRegisterClass<[XLenPairFVT], 64, (add
   X10_X11, X12_X13, X14_X15, X8_X9
 )>;
+
+def GPRPairNoX0 : RISCVRegisterClass<[XLenPairFVT], 64, (sub GPRPair, X0_Pair)>;
 } // let RegInfos = XLenPairRI, DecoderMethod = "DecodeGPRPairRegisterClass"
 
 // The register class is added for inline assembly for vector mask types.



More information about the cfe-commits mailing list