[llvm] b870199 - [RISCV] Add patterns for scalable-vector fabs & fcopysign

Fraser Cormack via llvm-commits llvm-commits at lists.llvm.org
Tue Feb 16 02:27:19 PST 2021


Author: Fraser Cormack
Date: 2021-02-16T10:21:09Z
New Revision: b8701990202cd305236401f76331d53e65953ef2

URL: https://github.com/llvm/llvm-project/commit/b8701990202cd305236401f76331d53e65953ef2
DIFF: https://github.com/llvm/llvm-project/commit/b8701990202cd305236401f76331d53e65953ef2.diff

LOG: [RISCV] Add patterns for scalable-vector fabs & fcopysign

The patterns mostly follow the scalar counterparts, save for some extra
optimizations to match the vector/scalar forms.

The patch adds a DAGCombine for ISD::FCOPYSIGN to try and reorder
ISD::FNEG around any ISD::FP_EXTEND or ISD::FP_TRUNC of the second
operand. This helps us achieve better codegen to match vfsgnjn.

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D96028

Added: 
    llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vfcopysign-sdnode.ll

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 2a9ea89d4655..e77224648bfc 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -497,6 +497,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
 
       setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
       setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
+      setOperationAction(ISD::FCOPYSIGN, VT, Legal);
     };
 
     if (Subtarget.hasStdExtZfh())
@@ -604,6 +605,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
   if (Subtarget.hasStdExtZbp()) {
     setTargetDAGCombine(ISD::OR);
   }
+  if (Subtarget.hasStdExtV())
+    setTargetDAGCombine(ISD::FCOPYSIGN);
 }
 
 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL,
@@ -2966,6 +2969,30 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
     }
     break;
   }
+  case ISD::FCOPYSIGN: {
+    EVT VT = N->getValueType(0);
+    if (!VT.isVector())
+      break;
+    // There is a form of VFSGNJ which injects the negated sign of its second
+    // operand. Try and bubble any FNEG up after the extend/round to produce
+    // this optimized pattern. Avoid modifying cases where FP_ROUND and
+    // TRUNC=1.
+    SDValue In2 = N->getOperand(1);
+    // Avoid cases where the extend/round has multiple uses, as duplicating
+    // those is typically more expensive than removing a fneg.
+    if (!In2.hasOneUse())
+      break;
+    if (In2.getOpcode() != ISD::FP_EXTEND &&
+        (In2.getOpcode() != ISD::FP_ROUND || In2.getConstantOperandVal(1) != 0))
+      break;
+    In2 = In2.getOperand(0);
+    if (In2.getOpcode() != ISD::FNEG)
+      break;
+    SDLoc DL(N);
+    SDValue NewFPExtRound = DAG.getFPExtendOrRound(In2.getOperand(0), DL, VT);
+    return DAG.getNode(ISD::FCOPYSIGN, DL, VT, N->getOperand(0),
+                       DAG.getNode(ISD::FNEG, DL, VT, NewFPExtRound));
+  }
   }
 
   return SDValue();

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
index 7424910a74eb..2c845af85c8f 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
@@ -642,11 +642,32 @@ foreach vti = AllFloatVectors in {
             (!cast<Instruction>("PseudoVFSQRT_V_"# vti.LMul.MX)
                  vti.RegClass:$rs2, vti.AVL, vti.SEW)>;
 
-  // 14.10. Vector Floating-Point Sign-Injection Instructions
+  // 14.12. Vector Floating-Point Sign-Injection Instructions
+  def : Pat<(fabs (vti.Vector vti.RegClass:$rs)),
+            (!cast<Instruction>("PseudoVFSGNJX_VV_"# vti.LMul.MX)
+                 vti.RegClass:$rs, vti.RegClass:$rs, vti.AVL, vti.SEW)>;
   // Handle fneg with VFSGNJN using the same input for both operands.
   def : Pat<(fneg (vti.Vector vti.RegClass:$rs)),
             (!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX)
                  vti.RegClass:$rs, vti.RegClass:$rs, vti.AVL, vti.SEW)>;
+
+  def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1),
+                                   (vti.Vector vti.RegClass:$rs2))),
+            (!cast<Instruction>("PseudoVFSGNJ_VV_"# vti.LMul.MX)
+                 vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.SEW)>;
+  def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1),
+                                   (vti.Vector (splat_vector vti.ScalarRegClass:$rs2)))),
+            (!cast<Instruction>("PseudoVFSGNJ_V"#vti.ScalarSuffix#"_"#vti.LMul.MX)
+                 vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, vti.AVL, vti.SEW)>;
+
+  def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1),
+                                   (vti.Vector (fneg vti.RegClass:$rs2)))),
+            (!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX)
+                 vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.SEW)>;
+  def : Pat<(vti.Vector (fcopysign (vti.Vector vti.RegClass:$rs1),
+                                   (vti.Vector (fneg (splat_vector vti.ScalarRegClass:$rs2))))),
+            (!cast<Instruction>("PseudoVFSGNJN_V"#vti.ScalarSuffix#"_"#vti.LMul.MX)
+                 vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, vti.AVL, vti.SEW)>;
 }
 
 // 14.11. Vector Floating-Point Compare Instructions

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode.ll
new file mode 100644
index 000000000000..314a7574f2df
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode.ll
@@ -0,0 +1,185 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=ilp32d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=lp64d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
+
+declare <vscale x 1 x half> @llvm.fabs.nxv1f16(<vscale x 1 x half>)
+
+define <vscale x 1 x half> @vfabs_nxv1f16(<vscale x 1 x half> %v) {
+; CHECK-LABEL: vfabs_nxv1f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vfsgnjx.vv v8, v8, v8
+; CHECK-NEXT:    ret
+  %r = call <vscale x 1 x half> @llvm.fabs.nxv1f16(<vscale x 1 x half> %v)
+  ret <vscale x 1 x half> %r
+}
+
+declare <vscale x 2 x half> @llvm.fabs.nxv2f16(<vscale x 2 x half>)
+
+define <vscale x 2 x half> @vfabs_nxv2f16(<vscale x 2 x half> %v) {
+; CHECK-LABEL: vfabs_nxv2f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf2,ta,mu
+; CHECK-NEXT:    vfsgnjx.vv v8, v8, v8
+; CHECK-NEXT:    ret
+  %r = call <vscale x 2 x half> @llvm.fabs.nxv2f16(<vscale x 2 x half> %v)
+  ret <vscale x 2 x half> %r
+}
+
+declare <vscale x 4 x half> @llvm.fabs.nxv4f16(<vscale x 4 x half>)
+
+define <vscale x 4 x half> @vfabs_nxv4f16(<vscale x 4 x half> %v) {
+; CHECK-LABEL: vfabs_nxv4f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m1,ta,mu
+; CHECK-NEXT:    vfsgnjx.vv v8, v8, v8
+; CHECK-NEXT:    ret
+  %r = call <vscale x 4 x half> @llvm.fabs.nxv4f16(<vscale x 4 x half> %v)
+  ret <vscale x 4 x half> %r
+}
+
+declare <vscale x 8 x half> @llvm.fabs.nxv8f16(<vscale x 8 x half>)
+
+define <vscale x 8 x half> @vfabs_nxv8f16(<vscale x 8 x half> %v) {
+; CHECK-LABEL: vfabs_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfsgnjx.vv v8, v8, v8
+; CHECK-NEXT:    ret
+  %r = call <vscale x 8 x half> @llvm.fabs.nxv8f16(<vscale x 8 x half> %v)
+  ret <vscale x 8 x half> %r
+}
+
+declare <vscale x 16 x half> @llvm.fabs.nxv16f16(<vscale x 16 x half>)
+
+define <vscale x 16 x half> @vfabs_nxv16f16(<vscale x 16 x half> %v) {
+; CHECK-LABEL: vfabs_nxv16f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vfsgnjx.vv v8, v8, v8
+; CHECK-NEXT:    ret
+  %r = call <vscale x 16 x half> @llvm.fabs.nxv16f16(<vscale x 16 x half> %v)
+  ret <vscale x 16 x half> %r
+}
+
+declare <vscale x 32 x half> @llvm.fabs.nxv32f16(<vscale x 32 x half>)
+
+define <vscale x 32 x half> @vfabs_nxv32f16(<vscale x 32 x half> %v) {
+; CHECK-LABEL: vfabs_nxv32f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vfsgnjx.vv v8, v8, v8
+; CHECK-NEXT:    ret
+  %r = call <vscale x 32 x half> @llvm.fabs.nxv32f16(<vscale x 32 x half> %v)
+  ret <vscale x 32 x half> %r
+}
+
+declare <vscale x 1 x float> @llvm.fabs.nxv1f32(<vscale x 1 x float>)
+
+define <vscale x 1 x float> @vfabs_nxv1f32(<vscale x 1 x float> %v) {
+; CHECK-LABEL: vfabs_nxv1f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vfsgnjx.vv v8, v8, v8
+; CHECK-NEXT:    ret
+  %r = call <vscale x 1 x float> @llvm.fabs.nxv1f32(<vscale x 1 x float> %v)
+  ret <vscale x 1 x float> %r
+}
+
+declare <vscale x 2 x float> @llvm.fabs.nxv2f32(<vscale x 2 x float>)
+
+define <vscale x 2 x float> @vfabs_nxv2f32(<vscale x 2 x float> %v) {
+; CHECK-LABEL: vfabs_nxv2f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m1,ta,mu
+; CHECK-NEXT:    vfsgnjx.vv v8, v8, v8
+; CHECK-NEXT:    ret
+  %r = call <vscale x 2 x float> @llvm.fabs.nxv2f32(<vscale x 2 x float> %v)
+  ret <vscale x 2 x float> %r
+}
+
+declare <vscale x 4 x float> @llvm.fabs.nxv4f32(<vscale x 4 x float>)
+
+define <vscale x 4 x float> @vfabs_nxv4f32(<vscale x 4 x float> %v) {
+; CHECK-LABEL: vfabs_nxv4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m2,ta,mu
+; CHECK-NEXT:    vfsgnjx.vv v8, v8, v8
+; CHECK-NEXT:    ret
+  %r = call <vscale x 4 x float> @llvm.fabs.nxv4f32(<vscale x 4 x float> %v)
+  ret <vscale x 4 x float> %r
+}
+
+declare <vscale x 8 x float> @llvm.fabs.nxv8f32(<vscale x 8 x float>)
+
+define <vscale x 8 x float> @vfabs_nxv8f32(<vscale x 8 x float> %v) {
+; CHECK-LABEL: vfabs_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfsgnjx.vv v8, v8, v8
+; CHECK-NEXT:    ret
+  %r = call <vscale x 8 x float> @llvm.fabs.nxv8f32(<vscale x 8 x float> %v)
+  ret <vscale x 8 x float> %r
+}
+
+declare <vscale x 16 x float> @llvm.fabs.nxv16f32(<vscale x 16 x float>)
+
+define <vscale x 16 x float> @vfabs_nxv16f32(<vscale x 16 x float> %v) {
+; CHECK-LABEL: vfabs_nxv16f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vfsgnjx.vv v8, v8, v8
+; CHECK-NEXT:    ret
+  %r = call <vscale x 16 x float> @llvm.fabs.nxv16f32(<vscale x 16 x float> %v)
+  ret <vscale x 16 x float> %r
+}
+
+declare <vscale x 1 x double> @llvm.fabs.nxv1f64(<vscale x 1 x double>)
+
+define <vscale x 1 x double> @vfabs_nxv1f64(<vscale x 1 x double> %v) {
+; CHECK-LABEL: vfabs_nxv1f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m1,ta,mu
+; CHECK-NEXT:    vfsgnjx.vv v8, v8, v8
+; CHECK-NEXT:    ret
+  %r = call <vscale x 1 x double> @llvm.fabs.nxv1f64(<vscale x 1 x double> %v)
+  ret <vscale x 1 x double> %r
+}
+
+declare <vscale x 2 x double> @llvm.fabs.nxv2f64(<vscale x 2 x double>)
+
+define <vscale x 2 x double> @vfabs_nxv2f64(<vscale x 2 x double> %v) {
+; CHECK-LABEL: vfabs_nxv2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m2,ta,mu
+; CHECK-NEXT:    vfsgnjx.vv v8, v8, v8
+; CHECK-NEXT:    ret
+  %r = call <vscale x 2 x double> @llvm.fabs.nxv2f64(<vscale x 2 x double> %v)
+  ret <vscale x 2 x double> %r
+}
+
+declare <vscale x 4 x double> @llvm.fabs.nxv4f64(<vscale x 4 x double>)
+
+define <vscale x 4 x double> @vfabs_nxv4f64(<vscale x 4 x double> %v) {
+; CHECK-LABEL: vfabs_nxv4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vfsgnjx.vv v8, v8, v8
+; CHECK-NEXT:    ret
+  %r = call <vscale x 4 x double> @llvm.fabs.nxv4f64(<vscale x 4 x double> %v)
+  ret <vscale x 4 x double> %r
+}
+
+declare <vscale x 8 x double> @llvm.fabs.nxv8f64(<vscale x 8 x double>)
+
+define <vscale x 8 x double> @vfabs_nxv8f64(<vscale x 8 x double> %v) {
+; CHECK-LABEL: vfabs_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfsgnjx.vv v8, v8, v8
+; CHECK-NEXT:    ret
+  %r = call <vscale x 8 x double> @llvm.fabs.nxv8f64(<vscale x 8 x double> %v)
+  ret <vscale x 8 x double> %r
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfcopysign-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfcopysign-sdnode.ll
new file mode 100644
index 000000000000..9c7f426e5022
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfcopysign-sdnode.ll
@@ -0,0 +1,1465 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=ilp32d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=lp64d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
+
+declare <vscale x 1 x half> @llvm.copysign.nxv1f16(<vscale x 1 x half>, <vscale x 1 x half>)
+
+define <vscale x 1 x half> @vfcopysign_vv_nxv1f16(<vscale x 1 x half> %vm, <vscale x 1 x half> %vs) {
+; CHECK-LABEL: vfcopysign_vv_nxv1f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %r = call <vscale x 1 x half> @llvm.copysign.nxv1f16(<vscale x 1 x half> %vm, <vscale x 1 x half> %vs)
+  ret <vscale x 1 x half> %r
+}
+
+define <vscale x 1 x half> @vfcopysign_vf_nxv1f16(<vscale x 1 x half> %vm, half %s) {
+; CHECK-LABEL: vfcopysign_vf_nxv1f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vfsgnj.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x half> undef, half %s, i32 0
+  %splat = shufflevector <vscale x 1 x half> %head, <vscale x 1 x half> undef, <vscale x 1 x i32> zeroinitializer
+  %r = call <vscale x 1 x half> @llvm.copysign.nxv1f16(<vscale x 1 x half> %vm, <vscale x 1 x half> %splat)
+  ret <vscale x 1 x half> %r
+}
+
+define <vscale x 1 x half> @vfcopynsign_vv_nxv1f16(<vscale x 1 x half> %vm, <vscale x 1 x half> %vs) {
+; CHECK-LABEL: vfcopynsign_vv_nxv1f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vfsgnjn.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %n = fneg <vscale x 1 x half> %vs
+  %r = call <vscale x 1 x half> @llvm.copysign.nxv1f16(<vscale x 1 x half> %vm, <vscale x 1 x half> %n)
+  ret <vscale x 1 x half> %r
+}
+
+define <vscale x 1 x half> @vfcopynsign_vf_nxv1f16(<vscale x 1 x half> %vm, half %s) {
+; CHECK-LABEL: vfcopynsign_vf_nxv1f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vfsgnjn.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x half> undef, half %s, i32 0
+  %splat = shufflevector <vscale x 1 x half> %head, <vscale x 1 x half> undef, <vscale x 1 x i32> zeroinitializer
+  %n = fneg <vscale x 1 x half> %splat
+  %r = call <vscale x 1 x half> @llvm.copysign.nxv1f16(<vscale x 1 x half> %vm, <vscale x 1 x half> %n)
+  ret <vscale x 1 x half> %r
+}
+
+define <vscale x 1 x half> @vfcopysign_exttrunc_vv_nxv1f16_nxv1f32(<vscale x 1 x half> %vm, <vscale x 1 x float> %vs) {
+; CHECK-LABEL: vfcopysign_exttrunc_vv_nxv1f16_nxv1f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vfncvt.f.f.w v25, v9
+; CHECK-NEXT:    vfsgnj.vv v8, v8, v25
+; CHECK-NEXT:    ret
+  %e = fptrunc <vscale x 1 x float> %vs to <vscale x 1 x half>
+  %r = call <vscale x 1 x half> @llvm.copysign.nxv1f16(<vscale x 1 x half> %vm, <vscale x 1 x half> %e)
+  ret <vscale x 1 x half> %r
+}
+
+define <vscale x 1 x half> @vfcopysign_exttrunc_vf_nxv1f16_nxv1f32(<vscale x 1 x half> %vm, float %s) {
+; CHECK-LABEL: vfcopysign_exttrunc_vf_nxv1f16_nxv1f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vfmv.v.f v25, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vfncvt.f.f.w v26, v25
+; CHECK-NEXT:    vfsgnj.vv v8, v8, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x float> undef, float %s, i32 0
+  %splat = shufflevector <vscale x 1 x float> %head, <vscale x 1 x float> undef, <vscale x 1 x i32> zeroinitializer
+  %esplat = fptrunc <vscale x 1 x float> %splat to <vscale x 1 x half>
+  %r = call <vscale x 1 x half> @llvm.copysign.nxv1f16(<vscale x 1 x half> %vm, <vscale x 1 x half> %esplat)
+  ret <vscale x 1 x half> %r
+}
+
+define <vscale x 1 x half> @vfcopynsign_exttrunc_vv_nxv1f16_nxv1f32(<vscale x 1 x half> %vm, <vscale x 1 x float> %vs) {
+; CHECK-LABEL: vfcopynsign_exttrunc_vv_nxv1f16_nxv1f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vfncvt.f.f.w v25, v9
+; CHECK-NEXT:    vfsgnjn.vv v8, v8, v25
+; CHECK-NEXT:    ret
+  %n = fneg <vscale x 1 x float> %vs
+  %eneg = fptrunc <vscale x 1 x float> %n to <vscale x 1 x half>
+  %r = call <vscale x 1 x half> @llvm.copysign.nxv1f16(<vscale x 1 x half> %vm, <vscale x 1 x half> %eneg)
+  ret <vscale x 1 x half> %r
+}
+
+define <vscale x 1 x half> @vfcopynsign_exttrunc_vf_nxv1f16_nxv1f32(<vscale x 1 x half> %vm, float %s) {
+; CHECK-LABEL: vfcopynsign_exttrunc_vf_nxv1f16_nxv1f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vfmv.v.f v25, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vfncvt.f.f.w v26, v25
+; CHECK-NEXT:    vfsgnjn.vv v8, v8, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x float> undef, float %s, i32 0
+  %splat = shufflevector <vscale x 1 x float> %head, <vscale x 1 x float> undef, <vscale x 1 x i32> zeroinitializer
+  %n = fneg <vscale x 1 x float> %splat
+  %eneg = fptrunc <vscale x 1 x float> %n to <vscale x 1 x half>
+  %r = call <vscale x 1 x half> @llvm.copysign.nxv1f16(<vscale x 1 x half> %vm, <vscale x 1 x half> %eneg)
+  ret <vscale x 1 x half> %r
+}
+
+define <vscale x 1 x half> @vfcopysign_exttrunc_vv_nxv1f16_nxv1f64(<vscale x 1 x half> %vm, <vscale x 1 x double> %vs) {
+; CHECK-LABEL: vfcopysign_exttrunc_vv_nxv1f16_nxv1f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vfncvt.rod.f.f.w v25, v9
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vfncvt.f.f.w v26, v25
+; CHECK-NEXT:    vfsgnj.vv v8, v8, v26
+; CHECK-NEXT:    ret
+  %e = fptrunc <vscale x 1 x double> %vs to <vscale x 1 x half>
+  %r = call <vscale x 1 x half> @llvm.copysign.nxv1f16(<vscale x 1 x half> %vm, <vscale x 1 x half> %e)
+  ret <vscale x 1 x half> %r
+}
+
+define <vscale x 1 x half> @vfcopysign_exttrunc_vf_nxv1f16_nxv1f64(<vscale x 1 x half> %vm, double %s) {
+; CHECK-LABEL: vfcopysign_exttrunc_vf_nxv1f16_nxv1f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m1,ta,mu
+; CHECK-NEXT:    vfmv.v.f v25, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vfncvt.rod.f.f.w v26, v25
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vfncvt.f.f.w v25, v26
+; CHECK-NEXT:    vfsgnj.vv v8, v8, v25
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x double> undef, double %s, i32 0
+  %splat = shufflevector <vscale x 1 x double> %head, <vscale x 1 x double> undef, <vscale x 1 x i32> zeroinitializer
+  %esplat = fptrunc <vscale x 1 x double> %splat to <vscale x 1 x half>
+  %r = call <vscale x 1 x half> @llvm.copysign.nxv1f16(<vscale x 1 x half> %vm, <vscale x 1 x half> %esplat)
+  ret <vscale x 1 x half> %r
+}
+
+define <vscale x 1 x half> @vfcopynsign_exttrunc_vv_nxv1f16_nxv1f64(<vscale x 1 x half> %vm, <vscale x 1 x double> %vs) {
+; CHECK-LABEL: vfcopynsign_exttrunc_vv_nxv1f16_nxv1f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vfncvt.rod.f.f.w v25, v9
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vfncvt.f.f.w v26, v25
+; CHECK-NEXT:    vfsgnjn.vv v8, v8, v26
+; CHECK-NEXT:    ret
+  %n = fneg <vscale x 1 x double> %vs
+  %eneg = fptrunc <vscale x 1 x double> %n to <vscale x 1 x half>
+  %r = call <vscale x 1 x half> @llvm.copysign.nxv1f16(<vscale x 1 x half> %vm, <vscale x 1 x half> %eneg)
+  ret <vscale x 1 x half> %r
+}
+
+define <vscale x 1 x half> @vfcopynsign_exttrunc_vf_nxv1f16_nxv1f64(<vscale x 1 x half> %vm, double %s) {
+; CHECK-LABEL: vfcopynsign_exttrunc_vf_nxv1f16_nxv1f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m1,ta,mu
+; CHECK-NEXT:    vfmv.v.f v25, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vfncvt.rod.f.f.w v26, v25
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vfncvt.f.f.w v25, v26
+; CHECK-NEXT:    vfsgnjn.vv v8, v8, v25
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x double> undef, double %s, i32 0
+  %splat = shufflevector <vscale x 1 x double> %head, <vscale x 1 x double> undef, <vscale x 1 x i32> zeroinitializer
+  %n = fneg <vscale x 1 x double> %splat
+  %eneg = fptrunc <vscale x 1 x double> %n to <vscale x 1 x half>
+  %r = call <vscale x 1 x half> @llvm.copysign.nxv1f16(<vscale x 1 x half> %vm, <vscale x 1 x half> %eneg)
+  ret <vscale x 1 x half> %r
+}
+
+declare <vscale x 2 x half> @llvm.copysign.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half>)
+
+define <vscale x 2 x half> @vfcopysign_vv_nxv2f16(<vscale x 2 x half> %vm, <vscale x 2 x half> %vs) {
+; CHECK-LABEL: vfcopysign_vv_nxv2f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf2,ta,mu
+; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %r = call <vscale x 2 x half> @llvm.copysign.nxv2f16(<vscale x 2 x half> %vm, <vscale x 2 x half> %vs)
+  ret <vscale x 2 x half> %r
+}
+
+define <vscale x 2 x half> @vfcopysign_vf_nxv2f16(<vscale x 2 x half> %vm, half %s) {
+; CHECK-LABEL: vfcopysign_vf_nxv2f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf2,ta,mu
+; CHECK-NEXT:    vfsgnj.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x half> undef, half %s, i32 0
+  %splat = shufflevector <vscale x 2 x half> %head, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer
+  %r = call <vscale x 2 x half> @llvm.copysign.nxv2f16(<vscale x 2 x half> %vm, <vscale x 2 x half> %splat)
+  ret <vscale x 2 x half> %r
+}
+
+define <vscale x 2 x half> @vfcopynsign_vv_nxv2f16(<vscale x 2 x half> %vm, <vscale x 2 x half> %vs) {
+; CHECK-LABEL: vfcopynsign_vv_nxv2f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf2,ta,mu
+; CHECK-NEXT:    vfsgnjn.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %n = fneg <vscale x 2 x half> %vs
+  %r = call <vscale x 2 x half> @llvm.copysign.nxv2f16(<vscale x 2 x half> %vm, <vscale x 2 x half> %n)
+  ret <vscale x 2 x half> %r
+}
+
+define <vscale x 2 x half> @vfcopynsign_vf_nxv2f16(<vscale x 2 x half> %vm, half %s) {
+; CHECK-LABEL: vfcopynsign_vf_nxv2f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf2,ta,mu
+; CHECK-NEXT:    vfsgnjn.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x half> undef, half %s, i32 0
+  %splat = shufflevector <vscale x 2 x half> %head, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer
+  %n = fneg <vscale x 2 x half> %splat
+  %r = call <vscale x 2 x half> @llvm.copysign.nxv2f16(<vscale x 2 x half> %vm, <vscale x 2 x half> %n)
+  ret <vscale x 2 x half> %r
+}
+
+declare <vscale x 4 x half> @llvm.copysign.nxv4f16(<vscale x 4 x half>, <vscale x 4 x half>)
+
+define <vscale x 4 x half> @vfcopysign_vv_nxv4f16(<vscale x 4 x half> %vm, <vscale x 4 x half> %vs) {
+; CHECK-LABEL: vfcopysign_vv_nxv4f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m1,ta,mu
+; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %r = call <vscale x 4 x half> @llvm.copysign.nxv4f16(<vscale x 4 x half> %vm, <vscale x 4 x half> %vs)
+  ret <vscale x 4 x half> %r
+}
+
+define <vscale x 4 x half> @vfcopysign_vf_nxv4f16(<vscale x 4 x half> %vm, half %s) {
+; CHECK-LABEL: vfcopysign_vf_nxv4f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m1,ta,mu
+; CHECK-NEXT:    vfsgnj.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x half> undef, half %s, i32 0
+  %splat = shufflevector <vscale x 4 x half> %head, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer
+  %r = call <vscale x 4 x half> @llvm.copysign.nxv4f16(<vscale x 4 x half> %vm, <vscale x 4 x half> %splat)
+  ret <vscale x 4 x half> %r
+}
+
+define <vscale x 4 x half> @vfcopynsign_vv_nxv4f16(<vscale x 4 x half> %vm, <vscale x 4 x half> %vs) {
+; CHECK-LABEL: vfcopynsign_vv_nxv4f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m1,ta,mu
+; CHECK-NEXT:    vfsgnjn.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %n = fneg <vscale x 4 x half> %vs
+  %r = call <vscale x 4 x half> @llvm.copysign.nxv4f16(<vscale x 4 x half> %vm, <vscale x 4 x half> %n)
+  ret <vscale x 4 x half> %r
+}
+
+define <vscale x 4 x half> @vfcopynsign_vf_nxv4f16(<vscale x 4 x half> %vm, half %s) {
+; CHECK-LABEL: vfcopynsign_vf_nxv4f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m1,ta,mu
+; CHECK-NEXT:    vfsgnjn.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x half> undef, half %s, i32 0
+  %splat = shufflevector <vscale x 4 x half> %head, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer
+  %n = fneg <vscale x 4 x half> %splat
+  %r = call <vscale x 4 x half> @llvm.copysign.nxv4f16(<vscale x 4 x half> %vm, <vscale x 4 x half> %n)
+  ret <vscale x 4 x half> %r
+}
+
+declare <vscale x 8 x half> @llvm.copysign.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>)
+
+define <vscale x 8 x half> @vfcopysign_vv_nxv8f16(<vscale x 8 x half> %vm, <vscale x 8 x half> %vs) {
+; CHECK-LABEL: vfcopysign_vv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfsgnj.vv v8, v8, v10
+; CHECK-NEXT:    ret
+  %r = call <vscale x 8 x half> @llvm.copysign.nxv8f16(<vscale x 8 x half> %vm, <vscale x 8 x half> %vs)
+  ret <vscale x 8 x half> %r
+}
+
+define <vscale x 8 x half> @vfcopysign_vf_nxv8f16(<vscale x 8 x half> %vm, half %s) {
+; CHECK-LABEL: vfcopysign_vf_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfsgnj.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %s, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %r = call <vscale x 8 x half> @llvm.copysign.nxv8f16(<vscale x 8 x half> %vm, <vscale x 8 x half> %splat)
+  ret <vscale x 8 x half> %r
+}
+
+define <vscale x 8 x half> @vfcopynsign_vv_nxv8f16(<vscale x 8 x half> %vm, <vscale x 8 x half> %vs) {
+; CHECK-LABEL: vfcopynsign_vv_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfsgnjn.vv v8, v8, v10
+; CHECK-NEXT:    ret
+  %n = fneg <vscale x 8 x half> %vs
+  %r = call <vscale x 8 x half> @llvm.copysign.nxv8f16(<vscale x 8 x half> %vm, <vscale x 8 x half> %n)
+  ret <vscale x 8 x half> %r
+}
+
+define <vscale x 8 x half> @vfcopynsign_vf_nxv8f16(<vscale x 8 x half> %vm, half %s) {
+; CHECK-LABEL: vfcopynsign_vf_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfsgnjn.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %s, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %n = fneg <vscale x 8 x half> %splat
+  %r = call <vscale x 8 x half> @llvm.copysign.nxv8f16(<vscale x 8 x half> %vm, <vscale x 8 x half> %n)
+  ret <vscale x 8 x half> %r
+}
+
+define <vscale x 8 x half> @vfcopysign_exttrunc_vv_nxv8f16_nxv8f32(<vscale x 8 x half> %vm, <vscale x 8 x float> %vs) {
+; CHECK-LABEL: vfcopysign_exttrunc_vv_nxv8f16_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfncvt.f.f.w v26, v12
+; CHECK-NEXT:    vfsgnj.vv v8, v8, v26
+; CHECK-NEXT:    ret
+  %e = fptrunc <vscale x 8 x float> %vs to <vscale x 8 x half>
+  %r = call <vscale x 8 x half> @llvm.copysign.nxv8f16(<vscale x 8 x half> %vm, <vscale x 8 x half> %e)
+  ret <vscale x 8 x half> %r
+}
+
+define <vscale x 8 x half> @vfcopysign_exttrunc_vf_nxv8f16_nxv8f32(<vscale x 8 x half> %vm, float %s) {
+; CHECK-LABEL: vfcopysign_exttrunc_vf_nxv8f16_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfmv.v.f v28, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfncvt.f.f.w v26, v28
+; CHECK-NEXT:    vfsgnj.vv v8, v8, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %s, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %esplat = fptrunc <vscale x 8 x float> %splat to <vscale x 8 x half>
+  %r = call <vscale x 8 x half> @llvm.copysign.nxv8f16(<vscale x 8 x half> %vm, <vscale x 8 x half> %esplat)
+  ret <vscale x 8 x half> %r
+}
+
+define <vscale x 8 x half> @vfcopynsign_exttrunc_vv_nxv8f16_nxv8f32(<vscale x 8 x half> %vm, <vscale x 8 x float> %vs) {
+; CHECK-LABEL: vfcopynsign_exttrunc_vv_nxv8f16_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfncvt.f.f.w v26, v12
+; CHECK-NEXT:    vfsgnjn.vv v8, v8, v26
+; CHECK-NEXT:    ret
+  %n = fneg <vscale x 8 x float> %vs
+  %eneg = fptrunc <vscale x 8 x float> %n to <vscale x 8 x half>
+  %r = call <vscale x 8 x half> @llvm.copysign.nxv8f16(<vscale x 8 x half> %vm, <vscale x 8 x half> %eneg)
+  ret <vscale x 8 x half> %r
+}
+
+define <vscale x 8 x half> @vfcopynsign_exttrunc_vf_nxv8f16_nxv8f32(<vscale x 8 x half> %vm, float %s) {
+; CHECK-LABEL: vfcopynsign_exttrunc_vf_nxv8f16_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfmv.v.f v28, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfncvt.f.f.w v26, v28
+; CHECK-NEXT:    vfsgnjn.vv v8, v8, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %s, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %n = fneg <vscale x 8 x float> %splat
+  %eneg = fptrunc <vscale x 8 x float> %n to <vscale x 8 x half>
+  %r = call <vscale x 8 x half> @llvm.copysign.nxv8f16(<vscale x 8 x half> %vm, <vscale x 8 x half> %eneg)
+  ret <vscale x 8 x half> %r
+}
+
+define <vscale x 8 x half> @vfcopysign_exttrunc_vv_nxv8f16_nxv8f64(<vscale x 8 x half> %vm, <vscale x 8 x double> %vs) {
+; CHECK-LABEL: vfcopysign_exttrunc_vv_nxv8f16_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfncvt.rod.f.f.w v28, v16
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfncvt.f.f.w v26, v28
+; CHECK-NEXT:    vfsgnj.vv v8, v8, v26
+; CHECK-NEXT:    ret
+  %e = fptrunc <vscale x 8 x double> %vs to <vscale x 8 x half>
+  %r = call <vscale x 8 x half> @llvm.copysign.nxv8f16(<vscale x 8 x half> %vm, <vscale x 8 x half> %e)
+  ret <vscale x 8 x half> %r
+}
+
+define <vscale x 8 x half> @vfcopysign_exttrunc_vf_nxv8f16_nxv8f64(<vscale x 8 x half> %vm, double %s) {
+; CHECK-LABEL: vfcopysign_exttrunc_vf_nxv8f16_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfmv.v.f v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfncvt.rod.f.f.w v28, v16
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfncvt.f.f.w v26, v28
+; CHECK-NEXT:    vfsgnj.vv v8, v8, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %s, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %esplat = fptrunc <vscale x 8 x double> %splat to <vscale x 8 x half>
+  %r = call <vscale x 8 x half> @llvm.copysign.nxv8f16(<vscale x 8 x half> %vm, <vscale x 8 x half> %esplat)
+  ret <vscale x 8 x half> %r
+}
+
+define <vscale x 8 x half> @vfcopynsign_exttrunc_vv_nxv8f16_nxv8f64(<vscale x 8 x half> %vm, <vscale x 8 x double> %vs) {
+; CHECK-LABEL: vfcopynsign_exttrunc_vv_nxv8f16_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfncvt.rod.f.f.w v28, v16
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfncvt.f.f.w v26, v28
+; CHECK-NEXT:    vfsgnjn.vv v8, v8, v26
+; CHECK-NEXT:    ret
+  %n = fneg <vscale x 8 x double> %vs
+  %eneg = fptrunc <vscale x 8 x double> %n to <vscale x 8 x half>
+  %r = call <vscale x 8 x half> @llvm.copysign.nxv8f16(<vscale x 8 x half> %vm, <vscale x 8 x half> %eneg)
+  ret <vscale x 8 x half> %r
+}
+
+define <vscale x 8 x half> @vfcopynsign_exttrunc_vf_nxv8f16_nxv8f64(<vscale x 8 x half> %vm, double %s) {
+; CHECK-LABEL: vfcopynsign_exttrunc_vf_nxv8f16_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfmv.v.f v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfncvt.rod.f.f.w v28, v16
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfncvt.f.f.w v26, v28
+; CHECK-NEXT:    vfsgnjn.vv v8, v8, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %s, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %n = fneg <vscale x 8 x double> %splat
+  %eneg = fptrunc <vscale x 8 x double> %n to <vscale x 8 x half>
+  %r = call <vscale x 8 x half> @llvm.copysign.nxv8f16(<vscale x 8 x half> %vm, <vscale x 8 x half> %eneg)
+  ret <vscale x 8 x half> %r
+}
+
+declare <vscale x 16 x half> @llvm.copysign.nxv16f16(<vscale x 16 x half>, <vscale x 16 x half>)
+
+define <vscale x 16 x half> @vfcopysign_vv_nxv16f16(<vscale x 16 x half> %vm, <vscale x 16 x half> %vs) {
+; CHECK-LABEL: vfcopysign_vv_nxv16f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vfsgnj.vv v8, v8, v12
+; CHECK-NEXT:    ret
+  %r = call <vscale x 16 x half> @llvm.copysign.nxv16f16(<vscale x 16 x half> %vm, <vscale x 16 x half> %vs)
+  ret <vscale x 16 x half> %r
+}
+
+define <vscale x 16 x half> @vfcopysign_vf_nxv16f16(<vscale x 16 x half> %vm, half %s) {
+; CHECK-LABEL: vfcopysign_vf_nxv16f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vfsgnj.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 16 x half> undef, half %s, i32 0
+  %splat = shufflevector <vscale x 16 x half> %head, <vscale x 16 x half> undef, <vscale x 16 x i32> zeroinitializer
+  %r = call <vscale x 16 x half> @llvm.copysign.nxv16f16(<vscale x 16 x half> %vm, <vscale x 16 x half> %splat)
+  ret <vscale x 16 x half> %r
+}
+
+define <vscale x 16 x half> @vfcopynsign_vv_nxv16f16(<vscale x 16 x half> %vm, <vscale x 16 x half> %vs) {
+; CHECK-LABEL: vfcopynsign_vv_nxv16f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vfsgnjn.vv v8, v8, v12
+; CHECK-NEXT:    ret
+  %n = fneg <vscale x 16 x half> %vs
+  %r = call <vscale x 16 x half> @llvm.copysign.nxv16f16(<vscale x 16 x half> %vm, <vscale x 16 x half> %n)
+  ret <vscale x 16 x half> %r
+}
+
+define <vscale x 16 x half> @vfcopynsign_vf_nxv16f16(<vscale x 16 x half> %vm, half %s) {
+; CHECK-LABEL: vfcopynsign_vf_nxv16f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vfsgnjn.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 16 x half> undef, half %s, i32 0
+  %splat = shufflevector <vscale x 16 x half> %head, <vscale x 16 x half> undef, <vscale x 16 x i32> zeroinitializer
+  %n = fneg <vscale x 16 x half> %splat
+  %r = call <vscale x 16 x half> @llvm.copysign.nxv16f16(<vscale x 16 x half> %vm, <vscale x 16 x half> %n)
+  ret <vscale x 16 x half> %r
+}
+
+declare <vscale x 32 x half> @llvm.copysign.nxv32f16(<vscale x 32 x half>, <vscale x 32 x half>)
+
+define <vscale x 32 x half> @vfcopysign_vv_nxv32f16(<vscale x 32 x half> %vm, <vscale x 32 x half> %vs) {
+; CHECK-LABEL: vfcopysign_vv_nxv32f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vfsgnj.vv v8, v8, v16
+; CHECK-NEXT:    ret
+  %r = call <vscale x 32 x half> @llvm.copysign.nxv32f16(<vscale x 32 x half> %vm, <vscale x 32 x half> %vs)
+  ret <vscale x 32 x half> %r
+}
+
+define <vscale x 32 x half> @vfcopysign_vf_nxv32f16(<vscale x 32 x half> %vm, half %s) {
+; CHECK-LABEL: vfcopysign_vf_nxv32f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vfsgnj.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 32 x half> undef, half %s, i32 0
+  %splat = shufflevector <vscale x 32 x half> %head, <vscale x 32 x half> undef, <vscale x 32 x i32> zeroinitializer
+  %r = call <vscale x 32 x half> @llvm.copysign.nxv32f16(<vscale x 32 x half> %vm, <vscale x 32 x half> %splat)
+  ret <vscale x 32 x half> %r
+}
+
+define <vscale x 32 x half> @vfcopynsign_vv_nxv32f16(<vscale x 32 x half> %vm, <vscale x 32 x half> %vs) {
+; CHECK-LABEL: vfcopynsign_vv_nxv32f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vfsgnjn.vv v8, v8, v16
+; CHECK-NEXT:    ret
+  %n = fneg <vscale x 32 x half> %vs
+  %r = call <vscale x 32 x half> @llvm.copysign.nxv32f16(<vscale x 32 x half> %vm, <vscale x 32 x half> %n)
+  ret <vscale x 32 x half> %r
+}
+
+define <vscale x 32 x half> @vfcopynsign_vf_nxv32f16(<vscale x 32 x half> %vm, half %s) {
+; CHECK-LABEL: vfcopynsign_vf_nxv32f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vfsgnjn.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 32 x half> undef, half %s, i32 0
+  %splat = shufflevector <vscale x 32 x half> %head, <vscale x 32 x half> undef, <vscale x 32 x i32> zeroinitializer
+  %n = fneg <vscale x 32 x half> %splat
+  %r = call <vscale x 32 x half> @llvm.copysign.nxv32f16(<vscale x 32 x half> %vm, <vscale x 32 x half> %n)
+  ret <vscale x 32 x half> %r
+}
+
+declare <vscale x 1 x float> @llvm.copysign.nxv1f32(<vscale x 1 x float>, <vscale x 1 x float>)
+
+define <vscale x 1 x float> @vfcopysign_vv_nxv1f32(<vscale x 1 x float> %vm, <vscale x 1 x float> %vs) {
+; CHECK-LABEL: vfcopysign_vv_nxv1f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %r = call <vscale x 1 x float> @llvm.copysign.nxv1f32(<vscale x 1 x float> %vm, <vscale x 1 x float> %vs)
+  ret <vscale x 1 x float> %r
+}
+
+define <vscale x 1 x float> @vfcopysign_vf_nxv1f32(<vscale x 1 x float> %vm, float %s) {
+; CHECK-LABEL: vfcopysign_vf_nxv1f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vfsgnj.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x float> undef, float %s, i32 0
+  %splat = shufflevector <vscale x 1 x float> %head, <vscale x 1 x float> undef, <vscale x 1 x i32> zeroinitializer
+  %r = call <vscale x 1 x float> @llvm.copysign.nxv1f32(<vscale x 1 x float> %vm, <vscale x 1 x float> %splat)
+  ret <vscale x 1 x float> %r
+}
+
+define <vscale x 1 x float> @vfcopynsign_vv_nxv1f32(<vscale x 1 x float> %vm, <vscale x 1 x float> %vs) {
+; CHECK-LABEL: vfcopynsign_vv_nxv1f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vfsgnjn.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %n = fneg <vscale x 1 x float> %vs
+  %r = call <vscale x 1 x float> @llvm.copysign.nxv1f32(<vscale x 1 x float> %vm, <vscale x 1 x float> %n)
+  ret <vscale x 1 x float> %r
+}
+
+define <vscale x 1 x float> @vfcopynsign_vf_nxv1f32(<vscale x 1 x float> %vm, float %s) {
+; CHECK-LABEL: vfcopynsign_vf_nxv1f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vfsgnjn.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x float> undef, float %s, i32 0
+  %splat = shufflevector <vscale x 1 x float> %head, <vscale x 1 x float> undef, <vscale x 1 x i32> zeroinitializer
+  %n = fneg <vscale x 1 x float> %splat
+  %r = call <vscale x 1 x float> @llvm.copysign.nxv1f32(<vscale x 1 x float> %vm, <vscale x 1 x float> %n)
+  ret <vscale x 1 x float> %r
+}
+
+define <vscale x 1 x float> @vfcopysign_exttrunc_vv_nxv1f32_nxv1f16(<vscale x 1 x float> %vm, <vscale x 1 x half> %vs) {
+; CHECK-LABEL: vfcopysign_exttrunc_vv_nxv1f32_nxv1f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vfwcvt.f.f.v v25, v9
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vfsgnj.vv v8, v8, v25
+; CHECK-NEXT:    ret
+  %e = fpext <vscale x 1 x half> %vs to <vscale x 1 x float>
+  %r = call <vscale x 1 x float> @llvm.copysign.nxv1f32(<vscale x 1 x float> %vm, <vscale x 1 x float> %e)
+  ret <vscale x 1 x float> %r
+}
+
+define <vscale x 1 x float> @vfcopysign_exttrunc_vf_nxv1f32_nxv1f16(<vscale x 1 x float> %vm, half %s) {
+; CHECK-LABEL: vfcopysign_exttrunc_vf_nxv1f32_nxv1f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vfmv.v.f v25, fa0
+; CHECK-NEXT:    vfwcvt.f.f.v v26, v25
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vfsgnj.vv v8, v8, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x half> undef, half %s, i32 0
+  %splat = shufflevector <vscale x 1 x half> %head, <vscale x 1 x half> undef, <vscale x 1 x i32> zeroinitializer
+  %esplat = fpext <vscale x 1 x half> %splat to <vscale x 1 x float>
+  %r = call <vscale x 1 x float> @llvm.copysign.nxv1f32(<vscale x 1 x float> %vm, <vscale x 1 x float> %esplat)
+  ret <vscale x 1 x float> %r
+}
+
+define <vscale x 1 x float> @vfcopynsign_exttrunc_vv_nxv1f32_nxv1f16(<vscale x 1 x float> %vm, <vscale x 1 x half> %vs) {
+; CHECK-LABEL: vfcopynsign_exttrunc_vv_nxv1f32_nxv1f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vfwcvt.f.f.v v25, v9
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vfsgnjn.vv v8, v8, v25
+; CHECK-NEXT:    ret
+  %n = fneg <vscale x 1 x half> %vs
+  %eneg = fpext <vscale x 1 x half> %n to <vscale x 1 x float>
+  %r = call <vscale x 1 x float> @llvm.copysign.nxv1f32(<vscale x 1 x float> %vm, <vscale x 1 x float> %eneg)
+  ret <vscale x 1 x float> %r
+}
+
+define <vscale x 1 x float> @vfcopynsign_exttrunc_vf_nxv1f32_nxv1f16(<vscale x 1 x float> %vm, half %s) {
+; CHECK-LABEL: vfcopynsign_exttrunc_vf_nxv1f32_nxv1f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vfmv.v.f v25, fa0
+; CHECK-NEXT:    vfwcvt.f.f.v v26, v25
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vfsgnjn.vv v8, v8, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x half> undef, half %s, i32 0
+  %splat = shufflevector <vscale x 1 x half> %head, <vscale x 1 x half> undef, <vscale x 1 x i32> zeroinitializer
+  %n = fneg <vscale x 1 x half> %splat
+  %eneg = fpext <vscale x 1 x half> %n to <vscale x 1 x float>
+  %r = call <vscale x 1 x float> @llvm.copysign.nxv1f32(<vscale x 1 x float> %vm, <vscale x 1 x float> %eneg)
+  ret <vscale x 1 x float> %r
+}
+
+define <vscale x 1 x float> @vfcopysign_exttrunc_vv_nxv1f32_nxv1f64(<vscale x 1 x float> %vm, <vscale x 1 x double> %vs) {
+; CHECK-LABEL: vfcopysign_exttrunc_vv_nxv1f32_nxv1f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vfncvt.f.f.w v25, v9
+; CHECK-NEXT:    vfsgnj.vv v8, v8, v25
+; CHECK-NEXT:    ret
+  %e = fptrunc <vscale x 1 x double> %vs to <vscale x 1 x float>
+  %r = call <vscale x 1 x float> @llvm.copysign.nxv1f32(<vscale x 1 x float> %vm, <vscale x 1 x float> %e)
+  ret <vscale x 1 x float> %r
+}
+
+define <vscale x 1 x float> @vfcopysign_exttrunc_vf_nxv1f32_nxv1f64(<vscale x 1 x float> %vm, double %s) {
+; CHECK-LABEL: vfcopysign_exttrunc_vf_nxv1f32_nxv1f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m1,ta,mu
+; CHECK-NEXT:    vfmv.v.f v25, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vfncvt.f.f.w v26, v25
+; CHECK-NEXT:    vfsgnj.vv v8, v8, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x double> undef, double %s, i32 0
+  %splat = shufflevector <vscale x 1 x double> %head, <vscale x 1 x double> undef, <vscale x 1 x i32> zeroinitializer
+  %esplat = fptrunc <vscale x 1 x double> %splat to <vscale x 1 x float>
+  %r = call <vscale x 1 x float> @llvm.copysign.nxv1f32(<vscale x 1 x float> %vm, <vscale x 1 x float> %esplat)
+  ret <vscale x 1 x float> %r
+}
+
+define <vscale x 1 x float> @vfcopynsign_exttrunc_vv_nxv1f32_nxv1f64(<vscale x 1 x float> %vm, <vscale x 1 x double> %vs) {
+; CHECK-LABEL: vfcopynsign_exttrunc_vv_nxv1f32_nxv1f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vfncvt.f.f.w v25, v9
+; CHECK-NEXT:    vfsgnjn.vv v8, v8, v25
+; CHECK-NEXT:    ret
+  %n = fneg <vscale x 1 x double> %vs
+  %eneg = fptrunc <vscale x 1 x double> %n to <vscale x 1 x float>
+  %r = call <vscale x 1 x float> @llvm.copysign.nxv1f32(<vscale x 1 x float> %vm, <vscale x 1 x float> %eneg)
+  ret <vscale x 1 x float> %r
+}
+
+define <vscale x 1 x float> @vfcopynsign_exttrunc_vf_nxv1f32_nxv1f64(<vscale x 1 x float> %vm, double %s) {
+; CHECK-LABEL: vfcopynsign_exttrunc_vf_nxv1f32_nxv1f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m1,ta,mu
+; CHECK-NEXT:    vfmv.v.f v25, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vfncvt.f.f.w v26, v25
+; CHECK-NEXT:    vfsgnjn.vv v8, v8, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x double> undef, double %s, i32 0
+  %splat = shufflevector <vscale x 1 x double> %head, <vscale x 1 x double> undef, <vscale x 1 x i32> zeroinitializer
+  %n = fneg <vscale x 1 x double> %splat
+  %eneg = fptrunc <vscale x 1 x double> %n to <vscale x 1 x float>
+  %r = call <vscale x 1 x float> @llvm.copysign.nxv1f32(<vscale x 1 x float> %vm, <vscale x 1 x float> %eneg)
+  ret <vscale x 1 x float> %r
+}
+
+declare <vscale x 2 x float> @llvm.copysign.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>)
+
+define <vscale x 2 x float> @vfcopysign_vv_nxv2f32(<vscale x 2 x float> %vm, <vscale x 2 x float> %vs) {
+; CHECK-LABEL: vfcopysign_vv_nxv2f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m1,ta,mu
+; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %r = call <vscale x 2 x float> @llvm.copysign.nxv2f32(<vscale x 2 x float> %vm, <vscale x 2 x float> %vs)
+  ret <vscale x 2 x float> %r
+}
+
+define <vscale x 2 x float> @vfcopysign_vf_nxv2f32(<vscale x 2 x float> %vm, float %s) {
+; CHECK-LABEL: vfcopysign_vf_nxv2f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m1,ta,mu
+; CHECK-NEXT:    vfsgnj.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x float> undef, float %s, i32 0
+  %splat = shufflevector <vscale x 2 x float> %head, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer
+  %r = call <vscale x 2 x float> @llvm.copysign.nxv2f32(<vscale x 2 x float> %vm, <vscale x 2 x float> %splat)
+  ret <vscale x 2 x float> %r
+}
+
+define <vscale x 2 x float> @vfcopynsign_vv_nxv2f32(<vscale x 2 x float> %vm, <vscale x 2 x float> %vs) {
+; CHECK-LABEL: vfcopynsign_vv_nxv2f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m1,ta,mu
+; CHECK-NEXT:    vfsgnjn.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %n = fneg <vscale x 2 x float> %vs
+  %r = call <vscale x 2 x float> @llvm.copysign.nxv2f32(<vscale x 2 x float> %vm, <vscale x 2 x float> %n)
+  ret <vscale x 2 x float> %r
+}
+
+define <vscale x 2 x float> @vfcopynsign_vf_nxv2f32(<vscale x 2 x float> %vm, float %s) {
+; CHECK-LABEL: vfcopynsign_vf_nxv2f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m1,ta,mu
+; CHECK-NEXT:    vfsgnjn.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x float> undef, float %s, i32 0
+  %splat = shufflevector <vscale x 2 x float> %head, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer
+  %n = fneg <vscale x 2 x float> %splat
+  %r = call <vscale x 2 x float> @llvm.copysign.nxv2f32(<vscale x 2 x float> %vm, <vscale x 2 x float> %n)
+  ret <vscale x 2 x float> %r
+}
+
+declare <vscale x 4 x float> @llvm.copysign.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>)
+
+define <vscale x 4 x float> @vfcopysign_vv_nxv4f32(<vscale x 4 x float> %vm, <vscale x 4 x float> %vs) {
+; CHECK-LABEL: vfcopysign_vv_nxv4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m2,ta,mu
+; CHECK-NEXT:    vfsgnj.vv v8, v8, v10
+; CHECK-NEXT:    ret
+  %r = call <vscale x 4 x float> @llvm.copysign.nxv4f32(<vscale x 4 x float> %vm, <vscale x 4 x float> %vs)
+  ret <vscale x 4 x float> %r
+}
+
+define <vscale x 4 x float> @vfcopysign_vf_nxv4f32(<vscale x 4 x float> %vm, float %s) {
+; CHECK-LABEL: vfcopysign_vf_nxv4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m2,ta,mu
+; CHECK-NEXT:    vfsgnj.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x float> undef, float %s, i32 0
+  %splat = shufflevector <vscale x 4 x float> %head, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
+  %r = call <vscale x 4 x float> @llvm.copysign.nxv4f32(<vscale x 4 x float> %vm, <vscale x 4 x float> %splat)
+  ret <vscale x 4 x float> %r
+}
+
+define <vscale x 4 x float> @vfcopynsign_vv_nxv4f32(<vscale x 4 x float> %vm, <vscale x 4 x float> %vs) {
+; CHECK-LABEL: vfcopynsign_vv_nxv4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m2,ta,mu
+; CHECK-NEXT:    vfsgnjn.vv v8, v8, v10
+; CHECK-NEXT:    ret
+  %n = fneg <vscale x 4 x float> %vs
+  %r = call <vscale x 4 x float> @llvm.copysign.nxv4f32(<vscale x 4 x float> %vm, <vscale x 4 x float> %n)
+  ret <vscale x 4 x float> %r
+}
+
+define <vscale x 4 x float> @vfcopynsign_vf_nxv4f32(<vscale x 4 x float> %vm, float %s) {
+; CHECK-LABEL: vfcopynsign_vf_nxv4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m2,ta,mu
+; CHECK-NEXT:    vfsgnjn.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x float> undef, float %s, i32 0
+  %splat = shufflevector <vscale x 4 x float> %head, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
+  %n = fneg <vscale x 4 x float> %splat
+  %r = call <vscale x 4 x float> @llvm.copysign.nxv4f32(<vscale x 4 x float> %vm, <vscale x 4 x float> %n)
+  ret <vscale x 4 x float> %r
+}
+
+declare <vscale x 8 x float> @llvm.copysign.nxv8f32(<vscale x 8 x float>, <vscale x 8 x float>)
+
+define <vscale x 8 x float> @vfcopysign_vv_nxv8f32(<vscale x 8 x float> %vm, <vscale x 8 x float> %vs) {
+; CHECK-LABEL: vfcopysign_vv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfsgnj.vv v8, v8, v12
+; CHECK-NEXT:    ret
+  %r = call <vscale x 8 x float> @llvm.copysign.nxv8f32(<vscale x 8 x float> %vm, <vscale x 8 x float> %vs)
+  ret <vscale x 8 x float> %r
+}
+
+define <vscale x 8 x float> @vfcopysign_vf_nxv8f32(<vscale x 8 x float> %vm, float %s) {
+; CHECK-LABEL: vfcopysign_vf_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfsgnj.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %s, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %r = call <vscale x 8 x float> @llvm.copysign.nxv8f32(<vscale x 8 x float> %vm, <vscale x 8 x float> %splat)
+  ret <vscale x 8 x float> %r
+}
+
+define <vscale x 8 x float> @vfcopynsign_vv_nxv8f32(<vscale x 8 x float> %vm, <vscale x 8 x float> %vs) {
+; CHECK-LABEL: vfcopynsign_vv_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfsgnjn.vv v8, v8, v12
+; CHECK-NEXT:    ret
+  %n = fneg <vscale x 8 x float> %vs
+  %r = call <vscale x 8 x float> @llvm.copysign.nxv8f32(<vscale x 8 x float> %vm, <vscale x 8 x float> %n)
+  ret <vscale x 8 x float> %r
+}
+
+define <vscale x 8 x float> @vfcopynsign_vf_nxv8f32(<vscale x 8 x float> %vm, float %s) {
+; CHECK-LABEL: vfcopynsign_vf_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfsgnjn.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %s, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %n = fneg <vscale x 8 x float> %splat
+  %r = call <vscale x 8 x float> @llvm.copysign.nxv8f32(<vscale x 8 x float> %vm, <vscale x 8 x float> %n)
+  ret <vscale x 8 x float> %r
+}
+
+define <vscale x 8 x float> @vfcopysign_exttrunc_vv_nxv8f32_nxv8f16(<vscale x 8 x float> %vm, <vscale x 8 x half> %vs) {
+; CHECK-LABEL: vfcopysign_exttrunc_vv_nxv8f32_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfwcvt.f.f.v v28, v12
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfsgnj.vv v8, v8, v28
+; CHECK-NEXT:    ret
+  %e = fpext <vscale x 8 x half> %vs to <vscale x 8 x float>
+  %r = call <vscale x 8 x float> @llvm.copysign.nxv8f32(<vscale x 8 x float> %vm, <vscale x 8 x float> %e)
+  ret <vscale x 8 x float> %r
+}
+
+define <vscale x 8 x float> @vfcopysign_exttrunc_vf_nxv8f32_nxv8f16(<vscale x 8 x float> %vm, half %s) {
+; CHECK-LABEL: vfcopysign_exttrunc_vf_nxv8f32_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfmv.v.f v26, fa0
+; CHECK-NEXT:    vfwcvt.f.f.v v28, v26
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfsgnj.vv v8, v8, v28
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %s, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %esplat = fpext <vscale x 8 x half> %splat to <vscale x 8 x float>
+  %r = call <vscale x 8 x float> @llvm.copysign.nxv8f32(<vscale x 8 x float> %vm, <vscale x 8 x float> %esplat)
+  ret <vscale x 8 x float> %r
+}
+
+define <vscale x 8 x float> @vfcopynsign_exttrunc_vv_nxv8f32_nxv8f16(<vscale x 8 x float> %vm, <vscale x 8 x half> %vs) {
+; CHECK-LABEL: vfcopynsign_exttrunc_vv_nxv8f32_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfwcvt.f.f.v v28, v12
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfsgnjn.vv v8, v8, v28
+; CHECK-NEXT:    ret
+  %n = fneg <vscale x 8 x half> %vs
+  %eneg = fpext <vscale x 8 x half> %n to <vscale x 8 x float>
+  %r = call <vscale x 8 x float> @llvm.copysign.nxv8f32(<vscale x 8 x float> %vm, <vscale x 8 x float> %eneg)
+  ret <vscale x 8 x float> %r
+}
+
+define <vscale x 8 x float> @vfcopynsign_exttrunc_vf_nxv8f32_nxv8f16(<vscale x 8 x float> %vm, half %s) {
+; CHECK-LABEL: vfcopynsign_exttrunc_vf_nxv8f32_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfmv.v.f v26, fa0
+; CHECK-NEXT:    vfwcvt.f.f.v v28, v26
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfsgnjn.vv v8, v8, v28
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %s, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %n = fneg <vscale x 8 x half> %splat
+  %eneg = fpext <vscale x 8 x half> %n to <vscale x 8 x float>
+  %r = call <vscale x 8 x float> @llvm.copysign.nxv8f32(<vscale x 8 x float> %vm, <vscale x 8 x float> %eneg)
+  ret <vscale x 8 x float> %r
+}
+
+define <vscale x 8 x float> @vfcopysign_exttrunc_vv_nxv8f32_nxv8f64(<vscale x 8 x float> %vm, <vscale x 8 x double> %vs) {
+; CHECK-LABEL: vfcopysign_exttrunc_vv_nxv8f32_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfncvt.f.f.w v28, v16
+; CHECK-NEXT:    vfsgnj.vv v8, v8, v28
+; CHECK-NEXT:    ret
+  %e = fptrunc <vscale x 8 x double> %vs to <vscale x 8 x float>
+  %r = call <vscale x 8 x float> @llvm.copysign.nxv8f32(<vscale x 8 x float> %vm, <vscale x 8 x float> %e)
+  ret <vscale x 8 x float> %r
+}
+
+define <vscale x 8 x float> @vfcopysign_exttrunc_vf_nxv8f32_nxv8f64(<vscale x 8 x float> %vm, double %s) {
+; CHECK-LABEL: vfcopysign_exttrunc_vf_nxv8f32_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfmv.v.f v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfncvt.f.f.w v28, v16
+; CHECK-NEXT:    vfsgnj.vv v8, v8, v28
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %s, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %esplat = fptrunc <vscale x 8 x double> %splat to <vscale x 8 x float>
+  %r = call <vscale x 8 x float> @llvm.copysign.nxv8f32(<vscale x 8 x float> %vm, <vscale x 8 x float> %esplat)
+  ret <vscale x 8 x float> %r
+}
+
+define <vscale x 8 x float> @vfcopynsign_exttrunc_vv_nxv8f32_nxv8f64(<vscale x 8 x float> %vm, <vscale x 8 x double> %vs) {
+; CHECK-LABEL: vfcopynsign_exttrunc_vv_nxv8f32_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfncvt.f.f.w v28, v16
+; CHECK-NEXT:    vfsgnjn.vv v8, v8, v28
+; CHECK-NEXT:    ret
+  %n = fneg <vscale x 8 x double> %vs
+  %eneg = fptrunc <vscale x 8 x double> %n to <vscale x 8 x float>
+  %r = call <vscale x 8 x float> @llvm.copysign.nxv8f32(<vscale x 8 x float> %vm, <vscale x 8 x float> %eneg)
+  ret <vscale x 8 x float> %r
+}
+
+define <vscale x 8 x float> @vfcopynsign_exttrunc_vf_nxv8f32_nxv8f64(<vscale x 8 x float> %vm, double %s) {
+; CHECK-LABEL: vfcopynsign_exttrunc_vf_nxv8f32_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfmv.v.f v16, fa0
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfncvt.f.f.w v28, v16
+; CHECK-NEXT:    vfsgnjn.vv v8, v8, v28
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %s, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %n = fneg <vscale x 8 x double> %splat
+  %eneg = fptrunc <vscale x 8 x double> %n to <vscale x 8 x float>
+  %r = call <vscale x 8 x float> @llvm.copysign.nxv8f32(<vscale x 8 x float> %vm, <vscale x 8 x float> %eneg)
+  ret <vscale x 8 x float> %r
+}
+
+declare <vscale x 16 x float> @llvm.copysign.nxv16f32(<vscale x 16 x float>, <vscale x 16 x float>)
+
+define <vscale x 16 x float> @vfcopysign_vv_nxv16f32(<vscale x 16 x float> %vm, <vscale x 16 x float> %vs) {
+; CHECK-LABEL: vfcopysign_vv_nxv16f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vfsgnj.vv v8, v8, v16
+; CHECK-NEXT:    ret
+  %r = call <vscale x 16 x float> @llvm.copysign.nxv16f32(<vscale x 16 x float> %vm, <vscale x 16 x float> %vs)
+  ret <vscale x 16 x float> %r
+}
+
+define <vscale x 16 x float> @vfcopysign_vf_nxv16f32(<vscale x 16 x float> %vm, float %s) {
+; CHECK-LABEL: vfcopysign_vf_nxv16f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vfsgnj.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 16 x float> undef, float %s, i32 0
+  %splat = shufflevector <vscale x 16 x float> %head, <vscale x 16 x float> undef, <vscale x 16 x i32> zeroinitializer
+  %r = call <vscale x 16 x float> @llvm.copysign.nxv16f32(<vscale x 16 x float> %vm, <vscale x 16 x float> %splat)
+  ret <vscale x 16 x float> %r
+}
+
+define <vscale x 16 x float> @vfcopynsign_vv_nxv16f32(<vscale x 16 x float> %vm, <vscale x 16 x float> %vs) {
+; CHECK-LABEL: vfcopynsign_vv_nxv16f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vfsgnjn.vv v8, v8, v16
+; CHECK-NEXT:    ret
+  %n = fneg <vscale x 16 x float> %vs
+  %r = call <vscale x 16 x float> @llvm.copysign.nxv16f32(<vscale x 16 x float> %vm, <vscale x 16 x float> %n)
+  ret <vscale x 16 x float> %r
+}
+
+define <vscale x 16 x float> @vfcopynsign_vf_nxv16f32(<vscale x 16 x float> %vm, float %s) {
+; CHECK-LABEL: vfcopynsign_vf_nxv16f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vfsgnjn.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 16 x float> undef, float %s, i32 0
+  %splat = shufflevector <vscale x 16 x float> %head, <vscale x 16 x float> undef, <vscale x 16 x i32> zeroinitializer
+  %n = fneg <vscale x 16 x float> %splat
+  %r = call <vscale x 16 x float> @llvm.copysign.nxv16f32(<vscale x 16 x float> %vm, <vscale x 16 x float> %n)
+  ret <vscale x 16 x float> %r
+}
+
+declare <vscale x 1 x double> @llvm.copysign.nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>)
+
+define <vscale x 1 x double> @vfcopysign_vv_nxv1f64(<vscale x 1 x double> %vm, <vscale x 1 x double> %vs) {
+; CHECK-LABEL: vfcopysign_vv_nxv1f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m1,ta,mu
+; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %r = call <vscale x 1 x double> @llvm.copysign.nxv1f64(<vscale x 1 x double> %vm, <vscale x 1 x double> %vs)
+  ret <vscale x 1 x double> %r
+}
+
+define <vscale x 1 x double> @vfcopysign_vf_nxv1f64(<vscale x 1 x double> %vm, double %s) {
+; CHECK-LABEL: vfcopysign_vf_nxv1f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m1,ta,mu
+; CHECK-NEXT:    vfsgnj.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x double> undef, double %s, i32 0
+  %splat = shufflevector <vscale x 1 x double> %head, <vscale x 1 x double> undef, <vscale x 1 x i32> zeroinitializer
+  %r = call <vscale x 1 x double> @llvm.copysign.nxv1f64(<vscale x 1 x double> %vm, <vscale x 1 x double> %splat)
+  ret <vscale x 1 x double> %r
+}
+
+define <vscale x 1 x double> @vfcopynsign_vv_nxv1f64(<vscale x 1 x double> %vm, <vscale x 1 x double> %vs) {
+; CHECK-LABEL: vfcopynsign_vv_nxv1f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m1,ta,mu
+; CHECK-NEXT:    vfsgnjn.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %n = fneg <vscale x 1 x double> %vs
+  %r = call <vscale x 1 x double> @llvm.copysign.nxv1f64(<vscale x 1 x double> %vm, <vscale x 1 x double> %n)
+  ret <vscale x 1 x double> %r
+}
+
+define <vscale x 1 x double> @vfcopynsign_vf_nxv1f64(<vscale x 1 x double> %vm, double %s) {
+; CHECK-LABEL: vfcopynsign_vf_nxv1f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m1,ta,mu
+; CHECK-NEXT:    vfsgnjn.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x double> undef, double %s, i32 0
+  %splat = shufflevector <vscale x 1 x double> %head, <vscale x 1 x double> undef, <vscale x 1 x i32> zeroinitializer
+  %n = fneg <vscale x 1 x double> %splat
+  %r = call <vscale x 1 x double> @llvm.copysign.nxv1f64(<vscale x 1 x double> %vm, <vscale x 1 x double> %n)
+  ret <vscale x 1 x double> %r
+}
+
+define <vscale x 1 x double> @vfcopysign_exttrunc_vv_nxv1f64_nxv1f16(<vscale x 1 x double> %vm, <vscale x 1 x half> %vs) {
+; CHECK-LABEL: vfcopysign_exttrunc_vv_nxv1f64_nxv1f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vfwcvt.f.f.v v25, v9
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vfwcvt.f.f.v v26, v25
+; CHECK-NEXT:    vsetvli a0, zero, e64,m1,ta,mu
+; CHECK-NEXT:    vfsgnj.vv v8, v8, v26
+; CHECK-NEXT:    ret
+  %e = fpext <vscale x 1 x half> %vs to <vscale x 1 x double>
+  %r = call <vscale x 1 x double> @llvm.copysign.nxv1f64(<vscale x 1 x double> %vm, <vscale x 1 x double> %e)
+  ret <vscale x 1 x double> %r
+}
+
+define <vscale x 1 x double> @vfcopysign_exttrunc_vf_nxv1f64_nxv1f16(<vscale x 1 x double> %vm, half %s) {
+; CHECK-LABEL: vfcopysign_exttrunc_vf_nxv1f64_nxv1f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vfmv.v.f v25, fa0
+; CHECK-NEXT:    vfwcvt.f.f.v v26, v25
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vfwcvt.f.f.v v25, v26
+; CHECK-NEXT:    vsetvli a0, zero, e64,m1,ta,mu
+; CHECK-NEXT:    vfsgnj.vv v8, v8, v25
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x half> undef, half %s, i32 0
+  %splat = shufflevector <vscale x 1 x half> %head, <vscale x 1 x half> undef, <vscale x 1 x i32> zeroinitializer
+  %esplat = fpext <vscale x 1 x half> %splat to <vscale x 1 x double>
+  %r = call <vscale x 1 x double> @llvm.copysign.nxv1f64(<vscale x 1 x double> %vm, <vscale x 1 x double> %esplat)
+  ret <vscale x 1 x double> %r
+}
+
+define <vscale x 1 x double> @vfcopynsign_exttrunc_vv_nxv1f64_nxv1f16(<vscale x 1 x double> %vm, <vscale x 1 x half> %vs) {
+; CHECK-LABEL: vfcopynsign_exttrunc_vv_nxv1f64_nxv1f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vfwcvt.f.f.v v25, v9
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vfwcvt.f.f.v v26, v25
+; CHECK-NEXT:    vsetvli a0, zero, e64,m1,ta,mu
+; CHECK-NEXT:    vfsgnjn.vv v8, v8, v26
+; CHECK-NEXT:    ret
+  %n = fneg <vscale x 1 x half> %vs
+  %eneg = fpext <vscale x 1 x half> %n to <vscale x 1 x double>
+  %r = call <vscale x 1 x double> @llvm.copysign.nxv1f64(<vscale x 1 x double> %vm, <vscale x 1 x double> %eneg)
+  ret <vscale x 1 x double> %r
+}
+
+define <vscale x 1 x double> @vfcopynsign_exttrunc_vf_nxv1f64_nxv1f16(<vscale x 1 x double> %vm, half %s) {
+; CHECK-LABEL: vfcopynsign_exttrunc_vf_nxv1f64_nxv1f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vfmv.v.f v25, fa0
+; CHECK-NEXT:    vfwcvt.f.f.v v26, v25
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vfwcvt.f.f.v v25, v26
+; CHECK-NEXT:    vsetvli a0, zero, e64,m1,ta,mu
+; CHECK-NEXT:    vfsgnjn.vv v8, v8, v25
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x half> undef, half %s, i32 0
+  %splat = shufflevector <vscale x 1 x half> %head, <vscale x 1 x half> undef, <vscale x 1 x i32> zeroinitializer
+  %n = fneg <vscale x 1 x half> %splat
+  %eneg = fpext <vscale x 1 x half> %n to <vscale x 1 x double>
+  %r = call <vscale x 1 x double> @llvm.copysign.nxv1f64(<vscale x 1 x double> %vm, <vscale x 1 x double> %eneg)
+  ret <vscale x 1 x double> %r
+}
+
+define <vscale x 1 x double> @vfcopysign_exttrunc_vv_nxv1f64_nxv1f32(<vscale x 1 x double> %vm, <vscale x 1 x float> %vs) {
+; CHECK-LABEL: vfcopysign_exttrunc_vv_nxv1f64_nxv1f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vfwcvt.f.f.v v25, v9
+; CHECK-NEXT:    vsetvli a0, zero, e64,m1,ta,mu
+; CHECK-NEXT:    vfsgnj.vv v8, v8, v25
+; CHECK-NEXT:    ret
+  %e = fpext <vscale x 1 x float> %vs to <vscale x 1 x double>
+  %r = call <vscale x 1 x double> @llvm.copysign.nxv1f64(<vscale x 1 x double> %vm, <vscale x 1 x double> %e)
+  ret <vscale x 1 x double> %r
+}
+
+define <vscale x 1 x double> @vfcopysign_exttrunc_vf_nxv1f64_nxv1f32(<vscale x 1 x double> %vm, float %s) {
+; CHECK-LABEL: vfcopysign_exttrunc_vf_nxv1f64_nxv1f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vfmv.v.f v25, fa0
+; CHECK-NEXT:    vfwcvt.f.f.v v26, v25
+; CHECK-NEXT:    vsetvli a0, zero, e64,m1,ta,mu
+; CHECK-NEXT:    vfsgnj.vv v8, v8, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x float> undef, float %s, i32 0
+  %splat = shufflevector <vscale x 1 x float> %head, <vscale x 1 x float> undef, <vscale x 1 x i32> zeroinitializer
+  %esplat = fpext <vscale x 1 x float> %splat to <vscale x 1 x double>
+  %r = call <vscale x 1 x double> @llvm.copysign.nxv1f64(<vscale x 1 x double> %vm, <vscale x 1 x double> %esplat)
+  ret <vscale x 1 x double> %r
+}
+
+define <vscale x 1 x double> @vfcopynsign_exttrunc_vv_nxv1f64_nxv1f32(<vscale x 1 x double> %vm, <vscale x 1 x float> %vs) {
+; CHECK-LABEL: vfcopynsign_exttrunc_vv_nxv1f64_nxv1f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vfwcvt.f.f.v v25, v9
+; CHECK-NEXT:    vsetvli a0, zero, e64,m1,ta,mu
+; CHECK-NEXT:    vfsgnjn.vv v8, v8, v25
+; CHECK-NEXT:    ret
+  %n = fneg <vscale x 1 x float> %vs
+  %eneg = fpext <vscale x 1 x float> %n to <vscale x 1 x double>
+  %r = call <vscale x 1 x double> @llvm.copysign.nxv1f64(<vscale x 1 x double> %vm, <vscale x 1 x double> %eneg)
+  ret <vscale x 1 x double> %r
+}
+
+define <vscale x 1 x double> @vfcopynsign_exttrunc_vf_nxv1f64_nxv1f32(<vscale x 1 x double> %vm, float %s) {
+; CHECK-LABEL: vfcopynsign_exttrunc_vf_nxv1f64_nxv1f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vfmv.v.f v25, fa0
+; CHECK-NEXT:    vfwcvt.f.f.v v26, v25
+; CHECK-NEXT:    vsetvli a0, zero, e64,m1,ta,mu
+; CHECK-NEXT:    vfsgnjn.vv v8, v8, v26
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x float> undef, float %s, i32 0
+  %splat = shufflevector <vscale x 1 x float> %head, <vscale x 1 x float> undef, <vscale x 1 x i32> zeroinitializer
+  %n = fneg <vscale x 1 x float> %splat
+  %eneg = fpext <vscale x 1 x float> %n to <vscale x 1 x double>
+  %r = call <vscale x 1 x double> @llvm.copysign.nxv1f64(<vscale x 1 x double> %vm, <vscale x 1 x double> %eneg)
+  ret <vscale x 1 x double> %r
+}
+
+declare <vscale x 2 x double> @llvm.copysign.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>)
+
+define <vscale x 2 x double> @vfcopysign_vv_nxv2f64(<vscale x 2 x double> %vm, <vscale x 2 x double> %vs) {
+; CHECK-LABEL: vfcopysign_vv_nxv2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m2,ta,mu
+; CHECK-NEXT:    vfsgnj.vv v8, v8, v10
+; CHECK-NEXT:    ret
+  %r = call <vscale x 2 x double> @llvm.copysign.nxv2f64(<vscale x 2 x double> %vm, <vscale x 2 x double> %vs)
+  ret <vscale x 2 x double> %r
+}
+
+define <vscale x 2 x double> @vfcopysign_vf_nxv2f64(<vscale x 2 x double> %vm, double %s) {
+; CHECK-LABEL: vfcopysign_vf_nxv2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m2,ta,mu
+; CHECK-NEXT:    vfsgnj.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x double> undef, double %s, i32 0
+  %splat = shufflevector <vscale x 2 x double> %head, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
+  %r = call <vscale x 2 x double> @llvm.copysign.nxv2f64(<vscale x 2 x double> %vm, <vscale x 2 x double> %splat)
+  ret <vscale x 2 x double> %r
+}
+
+define <vscale x 2 x double> @vfcopynsign_vv_nxv2f64(<vscale x 2 x double> %vm, <vscale x 2 x double> %vs) {
+; CHECK-LABEL: vfcopynsign_vv_nxv2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m2,ta,mu
+; CHECK-NEXT:    vfsgnjn.vv v8, v8, v10
+; CHECK-NEXT:    ret
+  %n = fneg <vscale x 2 x double> %vs
+  %r = call <vscale x 2 x double> @llvm.copysign.nxv2f64(<vscale x 2 x double> %vm, <vscale x 2 x double> %n)
+  ret <vscale x 2 x double> %r
+}
+
+define <vscale x 2 x double> @vfcopynsign_vf_nxv2f64(<vscale x 2 x double> %vm, double %s) {
+; CHECK-LABEL: vfcopynsign_vf_nxv2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m2,ta,mu
+; CHECK-NEXT:    vfsgnjn.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x double> undef, double %s, i32 0
+  %splat = shufflevector <vscale x 2 x double> %head, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer
+  %n = fneg <vscale x 2 x double> %splat
+  %r = call <vscale x 2 x double> @llvm.copysign.nxv2f64(<vscale x 2 x double> %vm, <vscale x 2 x double> %n)
+  ret <vscale x 2 x double> %r
+}
+
+declare <vscale x 4 x double> @llvm.copysign.nxv4f64(<vscale x 4 x double>, <vscale x 4 x double>)
+
+define <vscale x 4 x double> @vfcopysign_vv_nxv4f64(<vscale x 4 x double> %vm, <vscale x 4 x double> %vs) {
+; CHECK-LABEL: vfcopysign_vv_nxv4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vfsgnj.vv v8, v8, v12
+; CHECK-NEXT:    ret
+  %r = call <vscale x 4 x double> @llvm.copysign.nxv4f64(<vscale x 4 x double> %vm, <vscale x 4 x double> %vs)
+  ret <vscale x 4 x double> %r
+}
+
+define <vscale x 4 x double> @vfcopysign_vf_nxv4f64(<vscale x 4 x double> %vm, double %s) {
+; CHECK-LABEL: vfcopysign_vf_nxv4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vfsgnj.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x double> undef, double %s, i32 0
+  %splat = shufflevector <vscale x 4 x double> %head, <vscale x 4 x double> undef, <vscale x 4 x i32> zeroinitializer
+  %r = call <vscale x 4 x double> @llvm.copysign.nxv4f64(<vscale x 4 x double> %vm, <vscale x 4 x double> %splat)
+  ret <vscale x 4 x double> %r
+}
+
+define <vscale x 4 x double> @vfcopynsign_vv_nxv4f64(<vscale x 4 x double> %vm, <vscale x 4 x double> %vs) {
+; CHECK-LABEL: vfcopynsign_vv_nxv4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vfsgnjn.vv v8, v8, v12
+; CHECK-NEXT:    ret
+  %n = fneg <vscale x 4 x double> %vs
+  %r = call <vscale x 4 x double> @llvm.copysign.nxv4f64(<vscale x 4 x double> %vm, <vscale x 4 x double> %n)
+  ret <vscale x 4 x double> %r
+}
+
+define <vscale x 4 x double> @vfcopynsign_vf_nxv4f64(<vscale x 4 x double> %vm, double %s) {
+; CHECK-LABEL: vfcopynsign_vf_nxv4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vfsgnjn.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x double> undef, double %s, i32 0
+  %splat = shufflevector <vscale x 4 x double> %head, <vscale x 4 x double> undef, <vscale x 4 x i32> zeroinitializer
+  %n = fneg <vscale x 4 x double> %splat
+  %r = call <vscale x 4 x double> @llvm.copysign.nxv4f64(<vscale x 4 x double> %vm, <vscale x 4 x double> %n)
+  ret <vscale x 4 x double> %r
+}
+
+declare <vscale x 8 x double> @llvm.copysign.nxv8f64(<vscale x 8 x double>, <vscale x 8 x double>)
+
+define <vscale x 8 x double> @vfcopysign_vv_nxv8f64(<vscale x 8 x double> %vm, <vscale x 8 x double> %vs) {
+; CHECK-LABEL: vfcopysign_vv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfsgnj.vv v8, v8, v16
+; CHECK-NEXT:    ret
+  %r = call <vscale x 8 x double> @llvm.copysign.nxv8f64(<vscale x 8 x double> %vm, <vscale x 8 x double> %vs)
+  ret <vscale x 8 x double> %r
+}
+
+define <vscale x 8 x double> @vfcopysign_vf_nxv8f64(<vscale x 8 x double> %vm, double %s) {
+; CHECK-LABEL: vfcopysign_vf_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfsgnj.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %s, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %r = call <vscale x 8 x double> @llvm.copysign.nxv8f64(<vscale x 8 x double> %vm, <vscale x 8 x double> %splat)
+  ret <vscale x 8 x double> %r
+}
+
+define <vscale x 8 x double> @vfcopynsign_vv_nxv8f64(<vscale x 8 x double> %vm, <vscale x 8 x double> %vs) {
+; CHECK-LABEL: vfcopynsign_vv_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfsgnjn.vv v8, v8, v16
+; CHECK-NEXT:    ret
+  %n = fneg <vscale x 8 x double> %vs
+  %r = call <vscale x 8 x double> @llvm.copysign.nxv8f64(<vscale x 8 x double> %vm, <vscale x 8 x double> %n)
+  ret <vscale x 8 x double> %r
+}
+
+define <vscale x 8 x double> @vfcopynsign_vf_nxv8f64(<vscale x 8 x double> %vm, double %s) {
+; CHECK-LABEL: vfcopynsign_vf_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfsgnjn.vf v8, v8, fa0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x double> undef, double %s, i32 0
+  %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer
+  %n = fneg <vscale x 8 x double> %splat
+  %r = call <vscale x 8 x double> @llvm.copysign.nxv8f64(<vscale x 8 x double> %vm, <vscale x 8 x double> %n)
+  ret <vscale x 8 x double> %r
+}
+
+define <vscale x 8 x double> @vfcopysign_exttrunc_vv_nxv8f64_nxv8f16(<vscale x 8 x double> %vm, <vscale x 8 x half> %vs) {
+; CHECK-LABEL: vfcopysign_exttrunc_vv_nxv8f64_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfwcvt.f.f.v v28, v16
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfwcvt.f.f.v v16, v28
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfsgnj.vv v8, v8, v16
+; CHECK-NEXT:    ret
+  %e = fpext <vscale x 8 x half> %vs to <vscale x 8 x double>
+  %r = call <vscale x 8 x double> @llvm.copysign.nxv8f64(<vscale x 8 x double> %vm, <vscale x 8 x double> %e)
+  ret <vscale x 8 x double> %r
+}
+
+define <vscale x 8 x double> @vfcopysign_exttrunc_vf_nxv8f64_nxv8f16(<vscale x 8 x double> %vm, half %s) {
+; CHECK-LABEL: vfcopysign_exttrunc_vf_nxv8f64_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfmv.v.f v26, fa0
+; CHECK-NEXT:    vfwcvt.f.f.v v28, v26
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfwcvt.f.f.v v16, v28
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfsgnj.vv v8, v8, v16
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %s, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %esplat = fpext <vscale x 8 x half> %splat to <vscale x 8 x double>
+  %r = call <vscale x 8 x double> @llvm.copysign.nxv8f64(<vscale x 8 x double> %vm, <vscale x 8 x double> %esplat)
+  ret <vscale x 8 x double> %r
+}
+
+define <vscale x 8 x double> @vfcopynsign_exttrunc_vv_nxv8f64_nxv8f16(<vscale x 8 x double> %vm, <vscale x 8 x half> %vs) {
+; CHECK-LABEL: vfcopynsign_exttrunc_vv_nxv8f64_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfwcvt.f.f.v v28, v16
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfwcvt.f.f.v v16, v28
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfsgnjn.vv v8, v8, v16
+; CHECK-NEXT:    ret
+  %n = fneg <vscale x 8 x half> %vs
+  %eneg = fpext <vscale x 8 x half> %n to <vscale x 8 x double>
+  %r = call <vscale x 8 x double> @llvm.copysign.nxv8f64(<vscale x 8 x double> %vm, <vscale x 8 x double> %eneg)
+  ret <vscale x 8 x double> %r
+}
+
+define <vscale x 8 x double> @vfcopynsign_exttrunc_vf_nxv8f64_nxv8f16(<vscale x 8 x double> %vm, half %s) {
+; CHECK-LABEL: vfcopynsign_exttrunc_vf_nxv8f64_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vfmv.v.f v26, fa0
+; CHECK-NEXT:    vfwcvt.f.f.v v28, v26
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfwcvt.f.f.v v16, v28
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfsgnjn.vv v8, v8, v16
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x half> undef, half %s, i32 0
+  %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer
+  %n = fneg <vscale x 8 x half> %splat
+  %eneg = fpext <vscale x 8 x half> %n to <vscale x 8 x double>
+  %r = call <vscale x 8 x double> @llvm.copysign.nxv8f64(<vscale x 8 x double> %vm, <vscale x 8 x double> %eneg)
+  ret <vscale x 8 x double> %r
+}
+
+define <vscale x 8 x double> @vfcopysign_exttrunc_vv_nxv8f64_nxv8f32(<vscale x 8 x double> %vm, <vscale x 8 x float> %vs) {
+; CHECK-LABEL: vfcopysign_exttrunc_vv_nxv8f64_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfwcvt.f.f.v v24, v16
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfsgnj.vv v8, v8, v24
+; CHECK-NEXT:    ret
+  %e = fpext <vscale x 8 x float> %vs to <vscale x 8 x double>
+  %r = call <vscale x 8 x double> @llvm.copysign.nxv8f64(<vscale x 8 x double> %vm, <vscale x 8 x double> %e)
+  ret <vscale x 8 x double> %r
+}
+
+define <vscale x 8 x double> @vfcopysign_exttrunc_vf_nxv8f64_nxv8f32(<vscale x 8 x double> %vm, float %s) {
+; CHECK-LABEL: vfcopysign_exttrunc_vf_nxv8f64_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfmv.v.f v28, fa0
+; CHECK-NEXT:    vfwcvt.f.f.v v16, v28
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfsgnj.vv v8, v8, v16
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %s, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %esplat = fpext <vscale x 8 x float> %splat to <vscale x 8 x double>
+  %r = call <vscale x 8 x double> @llvm.copysign.nxv8f64(<vscale x 8 x double> %vm, <vscale x 8 x double> %esplat)
+  ret <vscale x 8 x double> %r
+}
+
+define <vscale x 8 x double> @vfcopynsign_exttrunc_vv_nxv8f64_nxv8f32(<vscale x 8 x double> %vm, <vscale x 8 x float> %vs) {
+; CHECK-LABEL: vfcopynsign_exttrunc_vv_nxv8f64_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfwcvt.f.f.v v24, v16
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfsgnjn.vv v8, v8, v24
+; CHECK-NEXT:    ret
+  %n = fneg <vscale x 8 x float> %vs
+  %eneg = fpext <vscale x 8 x float> %n to <vscale x 8 x double>
+  %r = call <vscale x 8 x double> @llvm.copysign.nxv8f64(<vscale x 8 x double> %vm, <vscale x 8 x double> %eneg)
+  ret <vscale x 8 x double> %r
+}
+
+define <vscale x 8 x double> @vfcopynsign_exttrunc_vf_nxv8f64_nxv8f32(<vscale x 8 x double> %vm, float %s) {
+; CHECK-LABEL: vfcopynsign_exttrunc_vf_nxv8f64_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vfmv.v.f v28, fa0
+; CHECK-NEXT:    vfwcvt.f.f.v v16, v28
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vfsgnjn.vv v8, v8, v16
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x float> undef, float %s, i32 0
+  %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+  %n = fneg <vscale x 8 x float> %splat
+  %eneg = fpext <vscale x 8 x float> %n to <vscale x 8 x double>
+  %r = call <vscale x 8 x double> @llvm.copysign.nxv8f64(<vscale x 8 x double> %vm, <vscale x 8 x double> %eneg)
+  ret <vscale x 8 x double> %r
+}


        


More information about the llvm-commits mailing list