[llvm] 946d29e - [RISCV] Support vector type strict_fsqrt.

Yeting Kuo via llvm-commits llvm-commits at lists.llvm.org
Sun Mar 26 23:02:29 PDT 2023


Author: Yeting Kuo
Date: 2023-03-27T14:02:22+08:00
New Revision: 946d29e7e9020ca6b382d9d985116cfa22d8569d

URL: https://github.com/llvm/llvm-project/commit/946d29e7e9020ca6b382d9d985116cfa22d8569d
DIFF: https://github.com/llvm/llvm-project/commit/946d29e7e9020ca6b382d9d985116cfa22d8569d.diff

LOG: [RISCV] Support vector type strict_fsqrt.

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D146911

Added: 
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-constrained-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vfsqrt-constrained-sdnode.ll

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/lib/Target/RISCV/RISCVISelLowering.h
    llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
    llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index fdc69ed30cbda..e01f02e35f2ed 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -806,7 +806,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
 
       setOperationAction(ISD::STRICT_FP_EXTEND, VT, Custom);
       setOperationAction({ISD::STRICT_FADD, ISD::STRICT_FSUB, ISD::STRICT_FMUL,
-                          ISD::STRICT_FDIV},
+                          ISD::STRICT_FDIV, ISD::STRICT_FSQRT},
                          VT, Legal);
     };
 
@@ -1023,7 +1023,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
 
         setOperationAction(ISD::STRICT_FP_EXTEND, VT, Custom);
         setOperationAction({ISD::STRICT_FADD, ISD::STRICT_FSUB,
-                            ISD::STRICT_FMUL, ISD::STRICT_FDIV},
+                            ISD::STRICT_FMUL, ISD::STRICT_FDIV,
+                            ISD::STRICT_FSQRT},
                            VT, Custom);
       }
 
@@ -4503,6 +4504,8 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
   case ISD::STRICT_FDIV:
     return lowerToScalableOp(Op, DAG, RISCVISD::STRICT_FDIV_VL,
                              /*HasMergeOp*/ true);
+  case ISD::STRICT_FSQRT:
+    return lowerToScalableOp(Op, DAG, RISCVISD::STRICT_FSQRT_VL);
   case ISD::MGATHER:
   case ISD::VP_GATHER:
     return lowerMaskedGather(Op, DAG);
@@ -14098,6 +14101,7 @@ const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
   NODE_NAME_CASE(STRICT_FSUB_VL)
   NODE_NAME_CASE(STRICT_FMUL_VL)
   NODE_NAME_CASE(STRICT_FDIV_VL)
+  NODE_NAME_CASE(STRICT_FSQRT_VL)
   NODE_NAME_CASE(STRICT_FP_EXTEND_VL)
   NODE_NAME_CASE(VWMUL_VL)
   NODE_NAME_CASE(VWMULU_VL)

diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index 19aaebc92ba6a..4a50447055889 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -334,6 +334,7 @@ enum NodeType : unsigned {
   STRICT_FSUB_VL,
   STRICT_FMUL_VL,
   STRICT_FDIV_VL,
+  STRICT_FSQRT_VL,
   STRICT_FP_EXTEND_VL,
 
   // WARNING: Do not add anything in the end unless you want the node to

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
index 95170d367e461..ed76f8182789d 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
@@ -1005,7 +1005,7 @@ defm : VPatWidenFPNegMulSacSDNode_VV_VF<"PseudoVFWNMSAC">;
 
 foreach vti = AllFloatVectors in {
   // 13.8. Vector Floating-Point Square-Root Instruction
-  def : Pat<(fsqrt (vti.Vector vti.RegClass:$rs2)),
+  def : Pat<(any_fsqrt (vti.Vector vti.RegClass:$rs2)),
             (!cast<Instruction>("PseudoVFSQRT_V_"# vti.LMul.MX#"_E"#vti.SEW)
                  vti.RegClass:$rs2, vti.AVL, vti.Log2SEW)>;
 

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
index 43ca9259ef2a8..0dabf8d10782d 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
@@ -111,6 +111,7 @@ def riscv_strict_fadd_vl  : SDNode<"RISCVISD::STRICT_FADD_VL",  SDT_RISCVFPBinOp
 def riscv_strict_fsub_vl  : SDNode<"RISCVISD::STRICT_FSUB_VL",  SDT_RISCVFPBinOp_VL, [SDNPHasChain]>;
 def riscv_strict_fmul_vl  : SDNode<"RISCVISD::STRICT_FMUL_VL",  SDT_RISCVFPBinOp_VL, [SDNPCommutative, SDNPHasChain]>;
 def riscv_strict_fdiv_vl  : SDNode<"RISCVISD::STRICT_FDIV_VL",  SDT_RISCVFPBinOp_VL, [SDNPHasChain]>;
+def riscv_strict_fsqrt_vl : SDNode<"RISCVISD::STRICT_FSQRT_VL", SDT_RISCVFPUnOp_VL, [SDNPHasChain]>;
 
 def any_riscv_fadd_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl),
                         [(riscv_fadd_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl),
@@ -124,6 +125,9 @@ def any_riscv_fmul_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$merge, node:$m
 def any_riscv_fdiv_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl),
                         [(riscv_fdiv_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl),
                          (riscv_strict_fdiv_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl)]>;
+def any_riscv_fsqrt_vl : PatFrags<(ops node:$src, node:$mask, node:$vl),
+                        [(riscv_fsqrt_vl node:$src, node:$mask, node:$vl),
+                         (riscv_strict_fsqrt_vl node:$src, node:$mask, node:$vl)]>;
 
 def SDT_RISCVVecFMA_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>,
                                               SDTCisSameAs<0, 2>,
@@ -1813,7 +1817,7 @@ defm : VPatFPSetCCVL_VV_VF_FV<SETOLE, "PseudoVMFLE", "PseudoVMFGE">;
 
 foreach vti = AllFloatVectors in {
   // 13.8. Vector Floating-Point Square-Root Instruction
-  def : Pat<(riscv_fsqrt_vl (vti.Vector vti.RegClass:$rs2), (vti.Mask V0),
+  def : Pat<(any_riscv_fsqrt_vl (vti.Vector vti.RegClass:$rs2), (vti.Mask V0),
                             VLOpFrag),
             (!cast<Instruction>("PseudoVFSQRT_V_"# vti.LMul.MX # "_E" # vti.SEW # "_MASK")
                  (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-constrained-sdnode.ll
new file mode 100644
index 0000000000000..080d4ae80b83e
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-constrained-sdnode.ll
@@ -0,0 +1,150 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
+
+declare <2 x half> @llvm.experimental.constrained.sqrt.v2f16(<2 x half>, metadata, metadata)
+
+define <2 x half> @vfsqrt_v2f16(<2 x half> %v) {
+; CHECK-LABEL: vfsqrt_v2f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT:    vfsqrt.v v8, v8
+; CHECK-NEXT:    ret
+  %r = call <2 x half> @llvm.experimental.constrained.sqrt.v2f16(<2 x half> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret <2 x half> %r
+}
+
+declare <4 x half> @llvm.experimental.constrained.sqrt.v4f16(<4 x half>, metadata, metadata)
+
+define <4 x half> @vfsqrt_v4f16(<4 x half> %v) {
+; CHECK-LABEL: vfsqrt_v4f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT:    vfsqrt.v v8, v8
+; CHECK-NEXT:    ret
+  %r = call <4 x half> @llvm.experimental.constrained.sqrt.v4f16(<4 x half> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret <4 x half> %r
+}
+
+declare <8 x half> @llvm.experimental.constrained.sqrt.v8f16(<8 x half>, metadata, metadata)
+
+define <8 x half> @vfsqrt_v8f16(<8 x half> %v) {
+; CHECK-LABEL: vfsqrt_v8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vfsqrt.v v8, v8
+; CHECK-NEXT:    ret
+  %r = call <8 x half> @llvm.experimental.constrained.sqrt.v8f16(<8 x half> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret <8 x half> %r
+}
+
+declare <16 x half> @llvm.experimental.constrained.sqrt.v16f16(<16 x half>, metadata, metadata)
+
+define <16 x half> @vfsqrt_v16f16(<16 x half> %v) {
+; CHECK-LABEL: vfsqrt_v16f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT:    vfsqrt.v v8, v8
+; CHECK-NEXT:    ret
+  %r = call <16 x half> @llvm.experimental.constrained.sqrt.v16f16(<16 x half> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret <16 x half> %r
+}
+
+declare <32 x half> @llvm.experimental.constrained.sqrt.v32f16(<32 x half>, metadata, metadata)
+
+define <32 x half> @vfsqrt_v32f16(<32 x half> %v) {
+; CHECK-LABEL: vfsqrt_v32f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 32
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vfsqrt.v v8, v8
+; CHECK-NEXT:    ret
+  %r = call <32 x half> @llvm.experimental.constrained.sqrt.v32f16(<32 x half> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret <32 x half> %r
+}
+
+declare <2 x float> @llvm.experimental.constrained.sqrt.v2f32(<2 x float>, metadata, metadata)
+
+define <2 x float> @vfsqrt_v2f32(<2 x float> %v) {
+; CHECK-LABEL: vfsqrt_v2f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT:    vfsqrt.v v8, v8
+; CHECK-NEXT:    ret
+  %r = call <2 x float> @llvm.experimental.constrained.sqrt.v2f32(<2 x float> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret <2 x float> %r
+}
+
+declare <4 x float> @llvm.experimental.constrained.sqrt.v4f32(<4 x float>, metadata, metadata)
+
+define <4 x float> @vfsqrt_v4f32(<4 x float> %v) {
+; CHECK-LABEL: vfsqrt_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vfsqrt.v v8, v8
+; CHECK-NEXT:    ret
+  %r = call <4 x float> @llvm.experimental.constrained.sqrt.v4f32(<4 x float> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret <4 x float> %r
+}
+
+declare <8 x float> @llvm.experimental.constrained.sqrt.v8f32(<8 x float>, metadata, metadata)
+
+define <8 x float> @vfsqrt_v8f32(<8 x float> %v) {
+; CHECK-LABEL: vfsqrt_v8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT:    vfsqrt.v v8, v8
+; CHECK-NEXT:    ret
+  %r = call <8 x float> @llvm.experimental.constrained.sqrt.v8f32(<8 x float> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret <8 x float> %r
+}
+
+declare <16 x float> @llvm.experimental.constrained.sqrt.v16f32(<16 x float>, metadata, metadata)
+
+define <16 x float> @vfsqrt_v16f32(<16 x float> %v) {
+; CHECK-LABEL: vfsqrt_v16f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT:    vfsqrt.v v8, v8
+; CHECK-NEXT:    ret
+  %r = call <16 x float> @llvm.experimental.constrained.sqrt.v16f32(<16 x float> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret <16 x float> %r
+}
+
+declare <2 x double> @llvm.experimental.constrained.sqrt.v2f64(<2 x double>, metadata, metadata)
+
+define <2 x double> @vfsqrt_v2f64(<2 x double> %v) {
+; CHECK-LABEL: vfsqrt_v2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT:    vfsqrt.v v8, v8
+; CHECK-NEXT:    ret
+  %r = call <2 x double> @llvm.experimental.constrained.sqrt.v2f64(<2 x double> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret <2 x double> %r
+}
+
+declare <4 x double> @llvm.experimental.constrained.sqrt.v4f64(<4 x double>, metadata, metadata)
+
+define <4 x double> @vfsqrt_v4f64(<4 x double> %v) {
+; CHECK-LABEL: vfsqrt_v4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT:    vfsqrt.v v8, v8
+; CHECK-NEXT:    ret
+  %r = call <4 x double> @llvm.experimental.constrained.sqrt.v4f64(<4 x double> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret <4 x double> %r
+}
+
+declare <8 x double> @llvm.experimental.constrained.sqrt.v8f64(<8 x double>, metadata, metadata)
+
+define <8 x double> @vfsqrt_v8f64(<8 x double> %v) {
+; CHECK-LABEL: vfsqrt_v8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; CHECK-NEXT:    vfsqrt.v v8, v8
+; CHECK-NEXT:    ret
+  %r = call <8 x double> @llvm.experimental.constrained.sqrt.v8f64(<8 x double> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret <8 x double> %r
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-constrained-sdnode.ll
new file mode 100644
index 0000000000000..f41ef9f1c7623
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-constrained-sdnode.ll
@@ -0,0 +1,185 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
+
+declare <vscale x 1 x half> @llvm.experimental.constrained.sqrt.nxv1f16(<vscale x 1 x half>, metadata, metadata)
+
+define <vscale x 1 x half> @vfsqrt_nxv1f16(<vscale x 1 x half> %v) {
+; CHECK-LABEL: vfsqrt_nxv1f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vfsqrt.v v8, v8
+; CHECK-NEXT:    ret
+  %r = call <vscale x 1 x half> @llvm.experimental.constrained.sqrt.nxv1f16(<vscale x 1 x half> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret <vscale x 1 x half> %r
+}
+
+declare <vscale x 2 x half> @llvm.experimental.constrained.sqrt.nxv2f16(<vscale x 2 x half>, metadata, metadata)
+
+define <vscale x 2 x half> @vfsqrt_nxv2f16(<vscale x 2 x half> %v) {
+; CHECK-LABEL: vfsqrt_nxv2f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfsqrt.v v8, v8
+; CHECK-NEXT:    ret
+  %r = call <vscale x 2 x half> @llvm.experimental.constrained.sqrt.nxv2f16(<vscale x 2 x half> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret <vscale x 2 x half> %r
+}
+
+declare <vscale x 4 x half> @llvm.experimental.constrained.sqrt.nxv4f16(<vscale x 4 x half>, metadata, metadata)
+
+define <vscale x 4 x half> @vfsqrt_nxv4f16(<vscale x 4 x half> %v) {
+; CHECK-LABEL: vfsqrt_nxv4f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vfsqrt.v v8, v8
+; CHECK-NEXT:    ret
+  %r = call <vscale x 4 x half> @llvm.experimental.constrained.sqrt.nxv4f16(<vscale x 4 x half> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret <vscale x 4 x half> %r
+}
+
+declare <vscale x 8 x half> @llvm.experimental.constrained.sqrt.nxv8f16(<vscale x 8 x half>, metadata, metadata)
+
+define <vscale x 8 x half> @vfsqrt_nxv8f16(<vscale x 8 x half> %v) {
+; CHECK-LABEL: vfsqrt_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vfsqrt.v v8, v8
+; CHECK-NEXT:    ret
+  %r = call <vscale x 8 x half> @llvm.experimental.constrained.sqrt.nxv8f16(<vscale x 8 x half> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret <vscale x 8 x half> %r
+}
+
+declare <vscale x 16 x half> @llvm.experimental.constrained.sqrt.nxv16f16(<vscale x 16 x half>, metadata, metadata)
+
+define <vscale x 16 x half> @vfsqrt_nxv16f16(<vscale x 16 x half> %v) {
+; CHECK-LABEL: vfsqrt_nxv16f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfsqrt.v v8, v8
+; CHECK-NEXT:    ret
+  %r = call <vscale x 16 x half> @llvm.experimental.constrained.sqrt.nxv16f16(<vscale x 16 x half> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret <vscale x 16 x half> %r
+}
+
+declare <vscale x 32 x half> @llvm.experimental.constrained.sqrt.nxv32f16(<vscale x 32 x half>, metadata, metadata)
+
+define <vscale x 32 x half> @vfsqrt_nxv32f16(<vscale x 32 x half> %v) {
+; CHECK-LABEL: vfsqrt_nxv32f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
+; CHECK-NEXT:    vfsqrt.v v8, v8
+; CHECK-NEXT:    ret
+  %r = call <vscale x 32 x half> @llvm.experimental.constrained.sqrt.nxv32f16(<vscale x 32 x half> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret <vscale x 32 x half> %r
+}
+
+declare <vscale x 1 x float> @llvm.experimental.constrained.sqrt.nxv1f32(<vscale x 1 x float>, metadata, metadata)
+
+define <vscale x 1 x float> @vfsqrt_nxv1f32(<vscale x 1 x float> %v) {
+; CHECK-LABEL: vfsqrt_nxv1f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vfsqrt.v v8, v8
+; CHECK-NEXT:    ret
+  %r = call <vscale x 1 x float> @llvm.experimental.constrained.sqrt.nxv1f32(<vscale x 1 x float> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret <vscale x 1 x float> %r
+}
+
+declare <vscale x 2 x float> @llvm.experimental.constrained.sqrt.nxv2f32(<vscale x 2 x float>, metadata, metadata)
+
+define <vscale x 2 x float> @vfsqrt_nxv2f32(<vscale x 2 x float> %v) {
+; CHECK-LABEL: vfsqrt_nxv2f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vfsqrt.v v8, v8
+; CHECK-NEXT:    ret
+  %r = call <vscale x 2 x float> @llvm.experimental.constrained.sqrt.nxv2f32(<vscale x 2 x float> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret <vscale x 2 x float> %r
+}
+
+declare <vscale x 4 x float> @llvm.experimental.constrained.sqrt.nxv4f32(<vscale x 4 x float>, metadata, metadata)
+
+define <vscale x 4 x float> @vfsqrt_nxv4f32(<vscale x 4 x float> %v) {
+; CHECK-LABEL: vfsqrt_nxv4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vfsqrt.v v8, v8
+; CHECK-NEXT:    ret
+  %r = call <vscale x 4 x float> @llvm.experimental.constrained.sqrt.nxv4f32(<vscale x 4 x float> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret <vscale x 4 x float> %r
+}
+
+declare <vscale x 8 x float> @llvm.experimental.constrained.sqrt.nxv8f32(<vscale x 8 x float>, metadata, metadata)
+
+define <vscale x 8 x float> @vfsqrt_nxv8f32(<vscale x 8 x float> %v) {
+; CHECK-LABEL: vfsqrt_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vfsqrt.v v8, v8
+; CHECK-NEXT:    ret
+  %r = call <vscale x 8 x float> @llvm.experimental.constrained.sqrt.nxv8f32(<vscale x 8 x float> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret <vscale x 8 x float> %r
+}
+
+declare <vscale x 16 x float> @llvm.experimental.constrained.sqrt.nxv16f32(<vscale x 16 x float>, metadata, metadata)
+
+define <vscale x 16 x float> @vfsqrt_nxv16f32(<vscale x 16 x float> %v) {
+; CHECK-LABEL: vfsqrt_nxv16f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vfsqrt.v v8, v8
+; CHECK-NEXT:    ret
+  %r = call <vscale x 16 x float> @llvm.experimental.constrained.sqrt.nxv16f32(<vscale x 16 x float> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret <vscale x 16 x float> %r
+}
+
+declare <vscale x 1 x double> @llvm.experimental.constrained.sqrt.nxv1f64(<vscale x 1 x double>, metadata, metadata)
+
+define <vscale x 1 x double> @vfsqrt_nxv1f64(<vscale x 1 x double> %v) {
+; CHECK-LABEL: vfsqrt_nxv1f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; CHECK-NEXT:    vfsqrt.v v8, v8
+; CHECK-NEXT:    ret
+  %r = call <vscale x 1 x double> @llvm.experimental.constrained.sqrt.nxv1f64(<vscale x 1 x double> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret <vscale x 1 x double> %r
+}
+
+declare <vscale x 2 x double> @llvm.experimental.constrained.sqrt.nxv2f64(<vscale x 2 x double>, metadata, metadata)
+
+define <vscale x 2 x double> @vfsqrt_nxv2f64(<vscale x 2 x double> %v) {
+; CHECK-LABEL: vfsqrt_nxv2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; CHECK-NEXT:    vfsqrt.v v8, v8
+; CHECK-NEXT:    ret
+  %r = call <vscale x 2 x double> @llvm.experimental.constrained.sqrt.nxv2f64(<vscale x 2 x double> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret <vscale x 2 x double> %r
+}
+
+declare <vscale x 4 x double> @llvm.experimental.constrained.sqrt.nxv4f64(<vscale x 4 x double>, metadata, metadata)
+
+define <vscale x 4 x double> @vfsqrt_nxv4f64(<vscale x 4 x double> %v) {
+; CHECK-LABEL: vfsqrt_nxv4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; CHECK-NEXT:    vfsqrt.v v8, v8
+; CHECK-NEXT:    ret
+  %r = call <vscale x 4 x double> @llvm.experimental.constrained.sqrt.nxv4f64(<vscale x 4 x double> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret <vscale x 4 x double> %r
+}
+
+declare <vscale x 8 x double> @llvm.experimental.constrained.sqrt.nxv8f64(<vscale x 8 x double>, metadata, metadata)
+
+define <vscale x 8 x double> @vfsqrt_nxv8f64(<vscale x 8 x double> %v) {
+; CHECK-LABEL: vfsqrt_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; CHECK-NEXT:    vfsqrt.v v8, v8
+; CHECK-NEXT:    ret
+  %r = call <vscale x 8 x double> @llvm.experimental.constrained.sqrt.nxv8f64(<vscale x 8 x double> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
+  ret <vscale x 8 x double> %r
+}


        


More information about the llvm-commits mailing list