[llvm] 1c3a667 - Recommit "[RISCV] Add intrinsics for vfmv.f.s and vfmv.s.f"

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Fri Dec 18 11:21:57 PST 2020


Author: Craig Topper
Date: 2020-12-18T11:19:05-08:00
New Revision: 1c3a6671c25ea72dcc241fde9313daa31d320fed

URL: https://github.com/llvm/llvm-project/commit/1c3a6671c25ea72dcc241fde9313daa31d320fed
DIFF: https://github.com/llvm/llvm-project/commit/1c3a6671c25ea72dcc241fde9313daa31d320fed.diff

LOG: Recommit "[RISCV] Add intrinsics for vfmv.f.s and vfmv.s.f"

This time with tests.

Original message:
Similar to D93365, but for floating point. No need for special ISD opcodes
though. We can directly isel these from intrinsics. I had to use anyfloat_ty
instead of anyvector_ty in the intrinsics to make LLVMVectorElementType not
crash when imported into the -gen-dag-isel tablegen backend.

Differential Revision: https://reviews.llvm.org/D93426

Added: 
    llvm/test/CodeGen/RISCV/rvv/vfmv.f.s.ll
    llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv64.ll

Modified: 
    llvm/include/llvm/IR/IntrinsicsRISCV.td
    llvm/lib/Target/RISCV/RISCVInstrInfoV.td
    llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index c6f3a492683e..3a089e0762f1 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -349,4 +349,12 @@ let TargetPrefix = "riscv" in {
                                     [IntrNoMem]>, RISCVVIntrinsic {
     let ExtendOperand = 2;
   }
+
+  def int_riscv_vfmv_f_s : Intrinsic<[LLVMVectorElementType<0>],
+                                     [llvm_anyfloat_ty],
+                                     [IntrNoMem]>, RISCVVIntrinsic;
+  def int_riscv_vfmv_s_f : Intrinsic<[llvm_anyfloat_ty],
+                                     [LLVMMatchType<0>, LLVMVectorElementType<0>,
+                                      llvm_anyint_ty],
+                                     [IntrNoMem]>, RISCVVIntrinsic;
 } // TargetPrefix = "riscv"

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
index 3c587636144f..677d9f392bb3 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
@@ -980,8 +980,9 @@ let hasSideEffects = 0, mayLoad = 0, mayStore = 0, vm = 1 in {
 // Floating-Point Scalar Move Instructions
 def VFMV_F_S : RVInstV<0b010000, 0b00000, OPFVV, (outs FPR32:$vd),
                       (ins VR:$vs2), "vfmv.f.s", "$vd, $vs2">;
-def VFMV_S_F : RVInstV2<0b010000, 0b00000, OPFVF, (outs VR:$vd),
-                      (ins FPR32:$rs1), "vfmv.s.f", "$vd, $rs1">;
+let Constraints = "$vd = $vd_wb" in
+def VFMV_S_F : RVInstV2<0b010000, 0b00000, OPFVF, (outs VR:$vd_wb),
+                      (ins VR:$vd, FPR32:$rs1), "vfmv.s.f", "$vd, $rs1">;
 
 } // hasSideEffects = 0, mayLoad = 0, mayStore = 0, vm = 1
 } // Predicates = [HasStdExtV, HasStdExtF]

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 4927b82ec661..92340785d861 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -1370,12 +1370,37 @@ let mayLoad = 0, mayStore = 0, hasSideEffects = 0, usesCustomInserter = 1,
           Constraints = "$rd = $rs1" in
       def PseudoVMV_S_X # "_" # m.MX: Pseudo<(outs m.vrclass:$rd),
                                              (ins m.vrclass:$rs1, GPR:$rs2,
-                                             GPR:$vl, ixlenimm:$sew),
+                                                  GPR:$vl, ixlenimm:$sew),
                                              []>, RISCVVPseudo;
     }
   }
 }
+} // Predicates = [HasStdExtV]
+
+//===----------------------------------------------------------------------===//
+// 17.2. Floating-Point Scalar Move Instructions
+//===----------------------------------------------------------------------===//
+
+let Predicates = [HasStdExtV, HasStdExtF] in {
+let mayLoad = 0, mayStore = 0, hasSideEffects = 0, usesCustomInserter = 1,
+    Uses = [VL, VTYPE] in {
+  foreach m = MxList.m in {
+    let VLMul = m.value in {
+      let SEWIndex = 2, BaseInstr = VFMV_F_S in
+      def PseudoVFMV_F_S # "_" # m.MX : Pseudo<(outs FPR32:$rd),
+                                               (ins m.vrclass:$rs2,
+                                                    ixlenimm:$sew),
+                                               []>, RISCVVPseudo;
+      let VLIndex = 3, SEWIndex = 4, BaseInstr = VFMV_S_F,
+          Constraints = "$rd = $rs1" in
+      def PseudoVFMV_S_F # "_" # m.MX : Pseudo<(outs m.vrclass:$rd),
+                                               (ins m.vrclass:$rs1, FPR32:$rs2,
+                                                    GPR:$vl, ixlenimm:$sew),
+                                               []>, RISCVVPseudo;
+    }
+  }
 }
+} // Predicates = [HasStdExtV, HasStdExtF]
 
 //===----------------------------------------------------------------------===//
 // Patterns.
@@ -1557,3 +1582,34 @@ foreach vti = AllIntegerVectors in {
              (vti.Vector $rs1), $rs2, (NoX0 GPR:$vl), vti.SEW)>;
 }
 } // Predicates = [HasStdExtV]
+
+//===----------------------------------------------------------------------===//
+// 17.2. Floating-Point Scalar Move Instructions
+//===----------------------------------------------------------------------===//
+
+let Predicates = [HasStdExtV, HasStdExtF] in {
+foreach fvti = AllFloatVectors in {
+  defvar instr = !cast<Instruction>("PseudoVFMV_F_S_" # fvti.LMul.MX);
+  def : Pat<(fvti.Scalar (int_riscv_vfmv_f_s (fvti.Vector fvti.RegClass:$rs2))),
+             // Floating point instructions with a scalar result will always
+             // generate the result in a register of class FPR32. When dealing
+             // with the f64 variant of a pattern we need to promote the FPR32
+             // subregister generated by the instruction to the FPR64 base
+             // register expected by the type in the pattern
+             !cond(!eq(!cast<string>(fvti.ScalarRegClass),
+                       !cast<string>(FPR64)):
+                      (SUBREG_TO_REG (i32 -1),
+                                     (instr $rs2, fvti.SEW), sub_32),
+                   !eq(!cast<string>(fvti.ScalarRegClass),
+                       !cast<string>(FPR16)):
+                      (EXTRACT_SUBREG (instr $rs2, fvti.SEW), sub_16),
+                   !eq(1, 1):
+                      (instr $rs2, fvti.SEW))>;
+
+  def : Pat<(fvti.Vector (int_riscv_vfmv_s_f (fvti.Vector fvti.RegClass:$rs1),
+                         (fvti.Scalar fvti.ScalarRegClass:$rs2), GPR:$vl)),
+            (!cast<Instruction>("PseudoVFMV_S_F_" # fvti.LMul.MX)
+             (fvti.Vector $rs1), ToFPR32<fvti.Scalar, fvti.ScalarRegClass, "rs2">.ret,
+             (NoX0 GPR:$vl), fvti.SEW)>;
+}
+} // Predicates = [HasStdExtV, HasStdExtF]

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.f.s.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.f.s.ll
new file mode 100644
index 000000000000..18ca597f06be
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmv.f.s.ll
@@ -0,0 +1,204 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-v,+experimental-zfh -target-abi lp64d -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-v,+experimental-zfh -target-abi ilp32d -verify-machineinstrs < %s | FileCheck %s
+
+declare half @llvm.riscv.vfmv.f.s.nxv1f16(<vscale x 1 x half>)
+
+define half @intrinsic_vfmv.f.s_s_nxv1f16(<vscale x 1 x half> %0) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vfmv.f.s fa0, v16
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h killed $f10_f
+; CHECK-NEXT:    ret
+entry:
+  %a = call half @llvm.riscv.vfmv.f.s.nxv1f16(<vscale x 1 x half> %0)
+  ret half %a
+}
+
+declare half @llvm.riscv.vfmv.f.s.nxv2f16(<vscale x 2 x half>)
+
+define half @intrinsic_vfmv.f.s_s_nxv2f16(<vscale x 2 x half> %0) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e16,mf2,ta,mu
+; CHECK-NEXT:    vfmv.f.s fa0, v16
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h killed $f10_f
+; CHECK-NEXT:    ret
+entry:
+  %a = call half @llvm.riscv.vfmv.f.s.nxv2f16(<vscale x 2 x half> %0)
+  ret half %a
+}
+
+declare half @llvm.riscv.vfmv.f.s.nxv4f16(<vscale x 4 x half>)
+
+define half @intrinsic_vfmv.f.s_s_nxv4f16(<vscale x 4 x half> %0) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e16,m1,ta,mu
+; CHECK-NEXT:    vfmv.f.s fa0, v16
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h killed $f10_f
+; CHECK-NEXT:    ret
+entry:
+  %a = call half @llvm.riscv.vfmv.f.s.nxv4f16(<vscale x 4 x half> %0)
+  ret half %a
+}
+
+declare half @llvm.riscv.vfmv.f.s.nxv8f16(<vscale x 8 x half>)
+
+define half @intrinsic_vfmv.f.s_s_nxv8f16(<vscale x 8 x half> %0) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfmv.f.s fa0, v16
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h killed $f10_f
+; CHECK-NEXT:    ret
+entry:
+  %a = call half @llvm.riscv.vfmv.f.s.nxv8f16(<vscale x 8 x half> %0)
+  ret half %a
+}
+
+declare half @llvm.riscv.vfmv.f.s.nxv16f16(<vscale x 16 x half>)
+
+define half @intrinsic_vfmv.f.s_s_nxv16f16(<vscale x 16 x half> %0) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv16f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vfmv.f.s fa0, v16
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h killed $f10_f
+; CHECK-NEXT:    ret
+entry:
+  %a = call half @llvm.riscv.vfmv.f.s.nxv16f16(<vscale x 16 x half> %0)
+  ret half %a
+}
+
+declare half @llvm.riscv.vfmv.f.s.nxv32f16(<vscale x 32 x half>)
+
+define half @intrinsic_vfmv.f.s_s_nxv32f16(<vscale x 32 x half> %0) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv32f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vfmv.f.s fa0, v16
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h killed $f10_f
+; CHECK-NEXT:    ret
+entry:
+  %a = call half @llvm.riscv.vfmv.f.s.nxv32f16(<vscale x 32 x half> %0)
+  ret half %a
+}
+
+declare float @llvm.riscv.vfmv.f.s.nxv1f32(<vscale x 1 x float>)
+
+define float @intrinsic_vfmv.f.s_s_nxv1f32(<vscale x 1 x float> %0) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vfmv.f.s fa0, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call float @llvm.riscv.vfmv.f.s.nxv1f32(<vscale x 1 x float> %0)
+  ret float %a
+}
+
+declare float @llvm.riscv.vfmv.f.s.nxv2f32(<vscale x 2 x float>)
+
+define float @intrinsic_vfmv.f.s_s_nxv2f32(<vscale x 2 x float> %0) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e32,m1,ta,mu
+; CHECK-NEXT:    vfmv.f.s fa0, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call float @llvm.riscv.vfmv.f.s.nxv2f32(<vscale x 2 x float> %0)
+  ret float %a
+}
+
+declare float @llvm.riscv.vfmv.f.s.nxv4f32(<vscale x 4 x float>)
+
+define float @intrinsic_vfmv.f.s_s_nxv4f32(<vscale x 4 x float> %0) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
+; CHECK-NEXT:    vfmv.f.s fa0, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call float @llvm.riscv.vfmv.f.s.nxv4f32(<vscale x 4 x float> %0)
+  ret float %a
+}
+
+declare float @llvm.riscv.vfmv.f.s.nxv8f32(<vscale x 8 x float>)
+
+define float @intrinsic_vfmv.f.s_s_nxv8f32(<vscale x 8 x float> %0) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfmv.f.s fa0, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call float @llvm.riscv.vfmv.f.s.nxv8f32(<vscale x 8 x float> %0)
+  ret float %a
+}
+
+declare float @llvm.riscv.vfmv.f.s.nxv16f32(<vscale x 16 x float>)
+
+define float @intrinsic_vfmv.f.s_s_nxv16f32(<vscale x 16 x float> %0) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vfmv.f.s fa0, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call float @llvm.riscv.vfmv.f.s.nxv16f32(<vscale x 16 x float> %0)
+  ret float %a
+}
+
+declare double @llvm.riscv.vfmv.f.s.nxv1f64(<vscale x 1 x double>)
+
+define double @intrinsic_vfmv.f.s_s_nxv1f64(<vscale x 1 x double> %0) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
+; CHECK-NEXT:    vfmv.f.s fa0, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call double @llvm.riscv.vfmv.f.s.nxv1f64(<vscale x 1 x double> %0)
+  ret double %a
+}
+
+declare double @llvm.riscv.vfmv.f.s.nxv2f64(<vscale x 2 x double>)
+
+define double @intrinsic_vfmv.f.s_s_nxv2f64(<vscale x 2 x double> %0) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e64,m2,ta,mu
+; CHECK-NEXT:    vfmv.f.s fa0, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call double @llvm.riscv.vfmv.f.s.nxv2f64(<vscale x 2 x double> %0)
+  ret double %a
+}
+
+declare double @llvm.riscv.vfmv.f.s.nxv4f64(<vscale x 4 x double>)
+
+define double @intrinsic_vfmv.f.s_s_nxv4f64(<vscale x 4 x double> %0) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vfmv.f.s fa0, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call double @llvm.riscv.vfmv.f.s.nxv4f64(<vscale x 4 x double> %0)
+  ret double %a
+}
+
+declare double @llvm.riscv.vfmv.f.s.nxv8f64(<vscale x 8 x double>)
+
+define double @intrinsic_vfmv.f.s_s_nxv8f64(<vscale x 8 x double> %0) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv8f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfmv.f.s fa0, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call double @llvm.riscv.vfmv.f.s.nxv8f64(<vscale x 8 x double> %0)
+  ret double %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv32.ll
new file mode 100644
index 000000000000..ecd5daaab8f9
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv32.ll
@@ -0,0 +1,203 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-v,+experimental-zfh -target-abi ilp32d -verify-machineinstrs < %s | FileCheck %s
+
+declare <vscale x 1 x half> @llvm.riscv.vfmv.s.f.nxv1f16(<vscale x 1 x half>, half, i32)
+
+define <vscale x 1 x half> @intrinsic_vfmv.s.f_f_nxv1f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vfmv.s.f v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x half> @llvm.riscv.vfmv.s.f.nxv1f16(<vscale x 1 x half> %0, half %1, i32 %2)
+  ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfmv.s.f.nxv2f16(<vscale x 2 x half>, half, i32)
+
+define <vscale x 2 x half> @intrinsic_vfmv.s.f_f_nxv2f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vfmv.s.f v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x half> @llvm.riscv.vfmv.s.f.nxv2f16(<vscale x 2 x half> %0, half %1, i32 %2)
+  ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfmv.s.f.nxv4f16(<vscale x 4 x half>, half, i32)
+
+define <vscale x 4 x half> @intrinsic_vfmv.s.f_f_nxv4f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vfmv.s.f v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x half> @llvm.riscv.vfmv.s.f.nxv4f16(<vscale x 4 x half> %0, half %1, i32 %2)
+  ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfmv.s.f.nxv8f16(<vscale x 8 x half>, half, i32)
+
+define <vscale x 8 x half> @intrinsic_vfmv.s.f_f_nxv8f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vfmv.s.f v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x half> @llvm.riscv.vfmv.s.f.nxv8f16(<vscale x 8 x half> %0, half %1, i32 %2)
+  ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfmv.s.f.nxv16f16(<vscale x 16 x half>, half, i32)
+
+define <vscale x 16 x half> @intrinsic_vfmv.s.f_f_nxv16f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv16f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vfmv.s.f v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x half> @llvm.riscv.vfmv.s.f.nxv16f16(<vscale x 16 x half> %0, half %1, i32 %2)
+  ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 32 x half> @llvm.riscv.vfmv.s.f.nxv32f16(<vscale x 32 x half>, half, i32)
+
+define <vscale x 32 x half> @intrinsic_vfmv.s.f_f_nxv32f16(<vscale x 32 x half> %0, half %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv32f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vfmv.s.f v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x half> @llvm.riscv.vfmv.s.f.nxv32f16(<vscale x 32 x half> %0, half %1, i32 %2)
+  ret <vscale x 32 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfmv.s.f.nxv1f32(<vscale x 1 x float>, float, i32)
+
+define <vscale x 1 x float> @intrinsic_vfmv.s.f_f_nxv1f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vfmv.s.f v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x float> @llvm.riscv.vfmv.s.f.nxv1f32(<vscale x 1 x float> %0, float %1, i32 %2)
+  ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfmv.s.f.nxv2f32(<vscale x 2 x float>, float, i32)
+
+define <vscale x 2 x float> @intrinsic_vfmv.s.f_f_nxv2f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vfmv.s.f v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfmv.s.f.nxv2f32(<vscale x 2 x float> %0, float %1, i32 %2)
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfmv.s.f.nxv4f32(<vscale x 4 x float>, float, i32)
+
+define <vscale x 4 x float> @intrinsic_vfmv.s.f_f_nxv4f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vfmv.s.f v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x float> @llvm.riscv.vfmv.s.f.nxv4f32(<vscale x 4 x float> %0, float %1, i32 %2)
+  ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfmv.s.f.nxv8f32(<vscale x 8 x float>, float, i32)
+
+define <vscale x 8 x float> @intrinsic_vfmv.s.f_f_nxv8f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vfmv.s.f v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x float> @llvm.riscv.vfmv.s.f.nxv8f32(<vscale x 8 x float> %0, float %1, i32 %2)
+  ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfmv.s.f.nxv16f32(<vscale x 16 x float>, float, i32)
+
+define <vscale x 16 x float> @intrinsic_vfmv.s.f_f_nxv16f32(<vscale x 16 x float> %0, float %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vfmv.s.f v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x float> @llvm.riscv.vfmv.s.f.nxv16f32(<vscale x 16 x float> %0, float %1, i32 %2)
+  ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfmv.s.f.nxv1f64(<vscale x 1 x double>, double, i32)
+
+define <vscale x 1 x double> @intrinsic_vfmv.s.f_f_nxv1f64(<vscale x 1 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vfmv.s.f v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfmv.s.f.nxv1f64(<vscale x 1 x double> %0, double %1, i32 %2)
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfmv.s.f.nxv2f64(<vscale x 2 x double>, double, i32)
+
+define <vscale x 2 x double> @intrinsic_vfmv.s.f_f_nxv2f64(<vscale x 2 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vfmv.s.f v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfmv.s.f.nxv2f64(<vscale x 2 x double> %0, double %1, i32 %2)
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfmv.s.f.nxv4f64(<vscale x 4 x double>, double, i32)
+
+define <vscale x 4 x double> @intrinsic_vfmv.s.f_f_nxv4f64(<vscale x 4 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vfmv.s.f v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfmv.s.f.nxv4f64(<vscale x 4 x double> %0, double %1, i32 %2)
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfmv.s.f.nxv8f64(<vscale x 8 x double>, double, i32)
+
+define <vscale x 8 x double> @intrinsic_vfmv.s.f_f_nxv8f64(<vscale x 8 x double> %0, double %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vfmv.s.f v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfmv.s.f.nxv8f64(<vscale x 8 x double> %0, double %1, i32 %2)
+  ret <vscale x 8 x double> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv64.ll
new file mode 100644
index 000000000000..635218d2689b
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f-rv64.ll
@@ -0,0 +1,203 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-v,+experimental-zfh -target-abi lp64d -verify-machineinstrs < %s | FileCheck %s
+
+declare <vscale x 1 x half> @llvm.riscv.vfmv.s.f.nxv1f16(<vscale x 1 x half>, half, i64)
+
+define <vscale x 1 x half> @intrinsic_vfmv.s.f_f_nxv1f16(<vscale x 1 x half> %0, half %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vfmv.s.f v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x half> @llvm.riscv.vfmv.s.f.nxv1f16(<vscale x 1 x half> %0, half %1, i64 %2)
+  ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vfmv.s.f.nxv2f16(<vscale x 2 x half>, half, i64)
+
+define <vscale x 2 x half> @intrinsic_vfmv.s.f_f_nxv2f16(<vscale x 2 x half> %0, half %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vfmv.s.f v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x half> @llvm.riscv.vfmv.s.f.nxv2f16(<vscale x 2 x half> %0, half %1, i64 %2)
+  ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfmv.s.f.nxv4f16(<vscale x 4 x half>, half, i64)
+
+define <vscale x 4 x half> @intrinsic_vfmv.s.f_f_nxv4f16(<vscale x 4 x half> %0, half %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vfmv.s.f v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x half> @llvm.riscv.vfmv.s.f.nxv4f16(<vscale x 4 x half> %0, half %1, i64 %2)
+  ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vfmv.s.f.nxv8f16(<vscale x 8 x half>, half, i64)
+
+define <vscale x 8 x half> @intrinsic_vfmv.s.f_f_nxv8f16(<vscale x 8 x half> %0, half %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vfmv.s.f v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x half> @llvm.riscv.vfmv.s.f.nxv8f16(<vscale x 8 x half> %0, half %1, i64 %2)
+  ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vfmv.s.f.nxv16f16(<vscale x 16 x half>, half, i64)
+
+define <vscale x 16 x half> @intrinsic_vfmv.s.f_f_nxv16f16(<vscale x 16 x half> %0, half %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv16f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vfmv.s.f v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x half> @llvm.riscv.vfmv.s.f.nxv16f16(<vscale x 16 x half> %0, half %1, i64 %2)
+  ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 32 x half> @llvm.riscv.vfmv.s.f.nxv32f16(<vscale x 32 x half>, half, i64)
+
+define <vscale x 32 x half> @intrinsic_vfmv.s.f_f_nxv32f16(<vscale x 32 x half> %0, half %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv32f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $f10_h killed $f10_h def $f10_f
+; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vfmv.s.f v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x half> @llvm.riscv.vfmv.s.f.nxv32f16(<vscale x 32 x half> %0, half %1, i64 %2)
+  ret <vscale x 32 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfmv.s.f.nxv1f32(<vscale x 1 x float>, float, i64)
+
+define <vscale x 1 x float> @intrinsic_vfmv.s.f_f_nxv1f32(<vscale x 1 x float> %0, float %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vfmv.s.f v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x float> @llvm.riscv.vfmv.s.f.nxv1f32(<vscale x 1 x float> %0, float %1, i64 %2)
+  ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfmv.s.f.nxv2f32(<vscale x 2 x float>, float, i64)
+
+define <vscale x 2 x float> @intrinsic_vfmv.s.f_f_nxv2f32(<vscale x 2 x float> %0, float %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vfmv.s.f v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfmv.s.f.nxv2f32(<vscale x 2 x float> %0, float %1, i64 %2)
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vfmv.s.f.nxv4f32(<vscale x 4 x float>, float, i64)
+
+define <vscale x 4 x float> @intrinsic_vfmv.s.f_f_nxv4f32(<vscale x 4 x float> %0, float %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vfmv.s.f v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x float> @llvm.riscv.vfmv.s.f.nxv4f32(<vscale x 4 x float> %0, float %1, i64 %2)
+  ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfmv.s.f.nxv8f32(<vscale x 8 x float>, float, i64)
+
+define <vscale x 8 x float> @intrinsic_vfmv.s.f_f_nxv8f32(<vscale x 8 x float> %0, float %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vfmv.s.f v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x float> @llvm.riscv.vfmv.s.f.nxv8f32(<vscale x 8 x float> %0, float %1, i64 %2)
+  ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vfmv.s.f.nxv16f32(<vscale x 16 x float>, float, i64)
+
+define <vscale x 16 x float> @intrinsic_vfmv.s.f_f_nxv16f32(<vscale x 16 x float> %0, float %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vfmv.s.f v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x float> @llvm.riscv.vfmv.s.f.nxv16f32(<vscale x 16 x float> %0, float %1, i64 %2)
+  ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfmv.s.f.nxv1f64(<vscale x 1 x double>, double, i64)
+
+define <vscale x 1 x double> @intrinsic_vfmv.s.f_f_nxv1f64(<vscale x 1 x double> %0, double %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vfmv.s.f v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfmv.s.f.nxv1f64(<vscale x 1 x double> %0, double %1, i64 %2)
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfmv.s.f.nxv2f64(<vscale x 2 x double>, double, i64)
+
+define <vscale x 2 x double> @intrinsic_vfmv.s.f_f_nxv2f64(<vscale x 2 x double> %0, double %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vfmv.s.f v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfmv.s.f.nxv2f64(<vscale x 2 x double> %0, double %1, i64 %2)
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfmv.s.f.nxv4f64(<vscale x 4 x double>, double, i64)
+
+define <vscale x 4 x double> @intrinsic_vfmv.s.f_f_nxv4f64(<vscale x 4 x double> %0, double %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vfmv.s.f v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfmv.s.f.nxv4f64(<vscale x 4 x double> %0, double %1, i64 %2)
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfmv.s.f.nxv8f64(<vscale x 8 x double>, double, i64)
+
+define <vscale x 8 x double> @intrinsic_vfmv.s.f_f_nxv8f64(<vscale x 8 x double> %0, double %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vfmv.s.f v16, fa0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfmv.s.f.nxv8f64(<vscale x 8 x double> %0, double %1, i64 %2)
+  ret <vscale x 8 x double> %a
+}


        


More information about the llvm-commits mailing list