[llvm] b1b9c39 - [AArch64][SVE] Use SVE for VLS fcopysign for wide vectors

David Truby via llvm-commits llvm-commits at lists.llvm.org
Wed Aug 10 03:17:36 PDT 2022


Author: David Truby
Date: 2022-08-10T10:17:19Z
New Revision: b1b9c39629b5af002cc265d9464e3085b17b91cb

URL: https://github.com/llvm/llvm-project/commit/b1b9c39629b5af002cc265d9464e3085b17b91cb
DIFF: https://github.com/llvm/llvm-project/commit/b1b9c39629b5af002cc265d9464e3085b17b91cb.diff

LOG: [AArch64][SVE] Use SVE for VLS fcopysign for wide vectors

Currently fcopysign for VLS vectors lowers through NEON even when the
vector width is wider than a NEON vector, causing bad codegen as the
vectors are split. This patch causes SVE to be used for these vectors
instead, giving much better codegen on wide VLS vectors.

Reviewed By: paulwalker-arm

Differential Revision: https://reviews.llvm.org/D128642

Added: 
    llvm/test/CodeGen/AArch64/sve-fixed-length-fcopysign.ll
    llvm/test/CodeGen/AArch64/sve2-fixed-length-fcopysign.ll

Modified: 
    llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
    llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
    llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/test/CodeGen/AArch64/sve-fcopysign.ll
    llvm/test/CodeGen/AArch64/sve2-fcopysign.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index bbec4f9d2c484..7ba494033262f 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -135,6 +135,11 @@ static cl::opt<bool> EnableShrinkLoadReplaceStoreWithStore(
     cl::desc("DAG combiner enable load/<replace bytes>/store with "
              "a narrower store"));
 
+static cl::opt<bool> EnableVectorFCopySignExtendRound(
+    "combiner-vector-fcopysign-extend-round", cl::Hidden, cl::init(false),
+    cl::desc(
+        "Enable merging extends and rounds into FCOPYSIGN on vector types"));
+
 namespace {
 
   class DAGCombiner {
@@ -15419,11 +15424,7 @@ static inline bool CanCombineFCOPYSIGN_EXTEND_ROUND(SDNode *N) {
     if (N1Op0VT == MVT::f128)
       return false;
 
-    // Avoid mismatched vector operand types, for better instruction selection.
-    if (N1Op0VT.isVector())
-      return false;
-
-    return true;
+    return !N1Op0VT.isVector() || EnableVectorFCopySignExtendRound;
   }
   return false;
 }

diff  --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index 853b8690c5837..bd0f09fc2f05b 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -3597,7 +3597,26 @@ SDValue DAGTypeLegalizer::SplitVecOp_FP_ROUND(SDNode *N) {
 SDValue DAGTypeLegalizer::SplitVecOp_FCOPYSIGN(SDNode *N) {
   // The result (and the first input) has a legal vector type, but the second
   // input needs splitting.
-  return DAG.UnrollVectorOp(N, N->getValueType(0).getVectorNumElements());
+
+  SDLoc DL(N);
+
+  EVT LHSLoVT, LHSHiVT;
+  std::tie(LHSLoVT, LHSHiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
+
+  if (!isTypeLegal(LHSLoVT) || !isTypeLegal(LHSHiVT))
+    return DAG.UnrollVectorOp(N, N->getValueType(0).getVectorNumElements());
+
+  SDValue LHSLo, LHSHi;
+  std::tie(LHSLo, LHSHi) =
+      DAG.SplitVector(N->getOperand(0), DL, LHSLoVT, LHSHiVT);
+
+  SDValue RHSLo, RHSHi;
+  std::tie(RHSLo, RHSHi) = DAG.SplitVector(N->getOperand(1), DL);
+
+  SDValue Lo = DAG.getNode(ISD::FCOPYSIGN, DL, LHSLoVT, LHSLo, RHSLo);
+  SDValue Hi = DAG.getNode(ISD::FCOPYSIGN, DL, LHSHiVT, LHSHi, RHSHi);
+
+  return DAG.getNode(ISD::CONCAT_VECTORS, DL, N->getValueType(0), Lo, Hi);
 }
 
 SDValue DAGTypeLegalizer::SplitVecOp_FP_TO_XINT_SAT(SDNode *N) {

diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index d3bca4f61e2f7..305156391e1fe 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1646,6 +1646,7 @@ void AArch64TargetLowering::addTypeForFixedLengthSVE(MVT VT) {
   setOperationAction(ISD::FADD, VT, Custom);
   setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
   setOperationAction(ISD::FCEIL, VT, Custom);
+  setOperationAction(ISD::FCOPYSIGN, VT, Custom);
   setOperationAction(ISD::FDIV, VT, Custom);
   setOperationAction(ISD::FFLOOR, VT, Custom);
   setOperationAction(ISD::FMA, VT, Custom);
@@ -7774,18 +7775,22 @@ SDValue AArch64TargetLowering::LowerFCOPYSIGN(SDValue Op,
   SDValue In2 = Op.getOperand(1);
   EVT SrcVT = In2.getValueType();
 
-  if (SrcVT.bitsLT(VT))
-    In2 = DAG.getNode(ISD::FP_EXTEND, DL, VT, In2);
-  else if (SrcVT.bitsGT(VT))
-    In2 = DAG.getNode(ISD::FP_ROUND, DL, VT, In2,
-                      DAG.getIntPtrConstant(0, DL, /*isTarget=*/true));
+  if (!SrcVT.bitsEq(VT))
+    In2 = DAG.getFPExtendOrRound(In2, DL, VT);
 
   if (VT.isScalableVector())
     IntVT =
         getPackedSVEVectorVT(VT.getVectorElementType().changeTypeToInteger());
 
-  if (VT != In2.getValueType())
-    return SDValue();
+  if (VT.isFixedLengthVector() && useSVEForFixedLengthVectorVT(VT)) {
+    EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
+
+    In1 = convertToScalableVector(DAG, ContainerVT, In1);
+    In2 = convertToScalableVector(DAG, ContainerVT, In2);
+
+    SDValue Res = DAG.getNode(ISD::FCOPYSIGN, DL, ContainerVT, In1, In2);
+    return convertFromScalableVector(DAG, VT, Res);
+  }
 
   auto BitCast = [this](EVT VT, SDValue Op, SelectionDAG &DAG) {
     if (VT.isScalableVector())
@@ -19516,18 +19521,13 @@ static SDValue performFPExtendCombine(SDNode *N, SelectionDAG &DAG,
 }
 
 static SDValue performBSPExpandForSVE(SDNode *N, SelectionDAG &DAG,
-                                      const AArch64Subtarget *Subtarget,
-                                      bool fixedSVEVectorVT) {
+                                      const AArch64Subtarget *Subtarget) {
   EVT VT = N->getValueType(0);
 
-  // Don't expand for SVE2
+  // Don't expand for NEON, SVE2 or SME
   if (!VT.isScalableVector() || Subtarget->hasSVE2() || Subtarget->hasSME())
     return SDValue();
 
-  // Don't expand for NEON
-  if (VT.isFixedLengthVector() && !fixedSVEVectorVT)
-    return SDValue();
-
   SDLoc DL(N);
 
   SDValue Mask = N->getOperand(0);
@@ -19699,8 +19699,7 @@ SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
   case AArch64ISD::SUNPKLO:
     return performSunpkloCombine(N, DAG);
   case AArch64ISD::BSP:
-    return performBSPExpandForSVE(
-        N, DAG, Subtarget, useSVEForFixedLengthVectorVT(N->getValueType(0)));
+    return performBSPExpandForSVE(N, DAG, Subtarget);
   case ISD::INSERT_VECTOR_ELT:
     return performInsertVectorEltCombine(N, DCI);
   case ISD::EXTRACT_VECTOR_ELT:

diff  --git a/llvm/test/CodeGen/AArch64/sve-fcopysign.ll b/llvm/test/CodeGen/AArch64/sve-fcopysign.ll
index fe6eaf8f7bcf6..aefcdbb46680b 100644
--- a/llvm/test/CodeGen/AArch64/sve-fcopysign.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fcopysign.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple aarch64-eabi -mattr=+sve -o - | FileCheck --check-prefixes=CHECK %s
-
+; RUN: llc < %s -mtriple aarch64-eabi -mattr=+sve -o - | FileCheck --check-prefixes=CHECK,CHECK-NO-EXTEND-ROUND %s
+; RUN: llc < %s -mtriple aarch64-eabi -mattr=+sve --combiner-vector-fcopysign-extend-round -o - | FileCheck --check-prefixes=CHECK,CHECK-EXTEND-ROUND %s
 target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
 
 ;============ v2f32
@@ -47,16 +47,32 @@ define <vscale x 4 x float> @test_copysign_v4f32_v4f32(<vscale x 4 x float> %a,
 
 ; SplitVecOp #1
 define <vscale x 4 x float> @test_copysign_v4f32_v4f64(<vscale x 4 x float> %a, <vscale x 4 x double> %b) #0 {
-; CHECK-LABEL: test_copysign_v4f32_v4f64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    and z0.s, z0.s, #0x7fffffff
-; CHECK-NEXT:    fcvt z2.s, p0/m, z2.d
-; CHECK-NEXT:    fcvt z1.s, p0/m, z1.d
-; CHECK-NEXT:    uzp1 z1.s, z1.s, z2.s
-; CHECK-NEXT:    and z1.s, z1.s, #0x80000000
-; CHECK-NEXT:    orr z0.d, z0.d, z1.d
-; CHECK-NEXT:    ret
+; CHECK-NO-EXTEND-ROUND-LABEL: test_copysign_v4f32_v4f64:
+; CHECK-NO-EXTEND-ROUND:       // %bb.0:
+; CHECK-NO-EXTEND-ROUND-NEXT:    ptrue p0.d
+; CHECK-NO-EXTEND-ROUND-NEXT:    and z0.s, z0.s, #0x7fffffff
+; CHECK-NO-EXTEND-ROUND-NEXT:    fcvt z2.s, p0/m, z2.d
+; CHECK-NO-EXTEND-ROUND-NEXT:    fcvt z1.s, p0/m, z1.d
+; CHECK-NO-EXTEND-ROUND-NEXT:    uzp1 z1.s, z1.s, z2.s
+; CHECK-NO-EXTEND-ROUND-NEXT:    and z1.s, z1.s, #0x80000000
+; CHECK-NO-EXTEND-ROUND-NEXT:    orr z0.d, z0.d, z1.d
+; CHECK-NO-EXTEND-ROUND-NEXT:    ret
+;
+; CHECK-EXTEND-ROUND-LABEL: test_copysign_v4f32_v4f64:
+; CHECK-EXTEND-ROUND:       // %bb.0:
+; CHECK-EXTEND-ROUND-NEXT:    ptrue p0.d
+; CHECK-EXTEND-ROUND-NEXT:    uunpkhi z3.d, z0.s
+; CHECK-EXTEND-ROUND-NEXT:    fcvt z2.s, p0/m, z2.d
+; CHECK-EXTEND-ROUND-NEXT:    fcvt z1.s, p0/m, z1.d
+; CHECK-EXTEND-ROUND-NEXT:    uunpklo z0.d, z0.s
+; CHECK-EXTEND-ROUND-NEXT:    and z2.s, z2.s, #0x80000000
+; CHECK-EXTEND-ROUND-NEXT:    and z3.s, z3.s, #0x7fffffff
+; CHECK-EXTEND-ROUND-NEXT:    and z1.s, z1.s, #0x80000000
+; CHECK-EXTEND-ROUND-NEXT:    and z0.s, z0.s, #0x7fffffff
+; CHECK-EXTEND-ROUND-NEXT:    orr z2.d, z3.d, z2.d
+; CHECK-EXTEND-ROUND-NEXT:    orr z0.d, z0.d, z1.d
+; CHECK-EXTEND-ROUND-NEXT:    uzp1 z0.s, z0.s, z2.s
+; CHECK-EXTEND-ROUND-NEXT:    ret
   %tmp0 = fptrunc <vscale x 4 x double> %b to <vscale x 4 x float>
   %r = call <vscale x 4 x float> @llvm.copysign.v4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %tmp0)
   ret <vscale x 4 x float> %r
@@ -161,16 +177,32 @@ define <vscale x 4 x half> @test_copysign_v4f16_v4f32(<vscale x 4 x half> %a, <v
 }
 
 define <vscale x 4 x half> @test_copysign_v4f16_v4f64(<vscale x 4 x half> %a, <vscale x 4 x double> %b) #0 {
-; CHECK-LABEL: test_copysign_v4f16_v4f64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    and z0.h, z0.h, #0x7fff
-; CHECK-NEXT:    fcvt z2.h, p0/m, z2.d
-; CHECK-NEXT:    fcvt z1.h, p0/m, z1.d
-; CHECK-NEXT:    uzp1 z1.s, z1.s, z2.s
-; CHECK-NEXT:    and z1.h, z1.h, #0x8000
-; CHECK-NEXT:    orr z0.d, z0.d, z1.d
-; CHECK-NEXT:    ret
+; CHECK-NO-EXTEND-ROUND-LABEL: test_copysign_v4f16_v4f64:
+; CHECK-NO-EXTEND-ROUND:       // %bb.0:
+; CHECK-NO-EXTEND-ROUND-NEXT:    ptrue p0.d
+; CHECK-NO-EXTEND-ROUND-NEXT:    and z0.h, z0.h, #0x7fff
+; CHECK-NO-EXTEND-ROUND-NEXT:    fcvt z2.h, p0/m, z2.d
+; CHECK-NO-EXTEND-ROUND-NEXT:    fcvt z1.h, p0/m, z1.d
+; CHECK-NO-EXTEND-ROUND-NEXT:    uzp1 z1.s, z1.s, z2.s
+; CHECK-NO-EXTEND-ROUND-NEXT:    and z1.h, z1.h, #0x8000
+; CHECK-NO-EXTEND-ROUND-NEXT:    orr z0.d, z0.d, z1.d
+; CHECK-NO-EXTEND-ROUND-NEXT:    ret
+;
+; CHECK-EXTEND-ROUND-LABEL: test_copysign_v4f16_v4f64:
+; CHECK-EXTEND-ROUND:       // %bb.0:
+; CHECK-EXTEND-ROUND-NEXT:    ptrue p0.d
+; CHECK-EXTEND-ROUND-NEXT:    uunpkhi z3.d, z0.s
+; CHECK-EXTEND-ROUND-NEXT:    fcvt z2.h, p0/m, z2.d
+; CHECK-EXTEND-ROUND-NEXT:    fcvt z1.h, p0/m, z1.d
+; CHECK-EXTEND-ROUND-NEXT:    uunpklo z0.d, z0.s
+; CHECK-EXTEND-ROUND-NEXT:    and z2.h, z2.h, #0x8000
+; CHECK-EXTEND-ROUND-NEXT:    and z3.h, z3.h, #0x7fff
+; CHECK-EXTEND-ROUND-NEXT:    and z1.h, z1.h, #0x8000
+; CHECK-EXTEND-ROUND-NEXT:    and z0.h, z0.h, #0x7fff
+; CHECK-EXTEND-ROUND-NEXT:    orr z2.d, z3.d, z2.d
+; CHECK-EXTEND-ROUND-NEXT:    orr z0.d, z0.d, z1.d
+; CHECK-EXTEND-ROUND-NEXT:    uzp1 z0.s, z0.s, z2.s
+; CHECK-EXTEND-ROUND-NEXT:    ret
   %tmp0 = fptrunc <vscale x 4 x double> %b to <vscale x 4 x half>
   %r = call <vscale x 4 x half> @llvm.copysign.v4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %tmp0)
   ret <vscale x 4 x half> %r
@@ -192,16 +224,32 @@ define <vscale x 8 x half> @test_copysign_v8f16_v8f16(<vscale x 8 x half> %a, <v
 }
 
 define <vscale x 8 x half> @test_copysign_v8f16_v8f32(<vscale x 8 x half> %a, <vscale x 8 x float> %b) #0 {
-; CHECK-LABEL: test_copysign_v8f16_v8f32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
-; CHECK-NEXT:    and z0.h, z0.h, #0x7fff
-; CHECK-NEXT:    fcvt z2.h, p0/m, z2.s
-; CHECK-NEXT:    fcvt z1.h, p0/m, z1.s
-; CHECK-NEXT:    uzp1 z1.h, z1.h, z2.h
-; CHECK-NEXT:    and z1.h, z1.h, #0x8000
-; CHECK-NEXT:    orr z0.d, z0.d, z1.d
-; CHECK-NEXT:    ret
+; CHECK-NO-EXTEND-ROUND-LABEL: test_copysign_v8f16_v8f32:
+; CHECK-NO-EXTEND-ROUND:       // %bb.0:
+; CHECK-NO-EXTEND-ROUND-NEXT:    ptrue p0.s
+; CHECK-NO-EXTEND-ROUND-NEXT:    and z0.h, z0.h, #0x7fff
+; CHECK-NO-EXTEND-ROUND-NEXT:    fcvt z2.h, p0/m, z2.s
+; CHECK-NO-EXTEND-ROUND-NEXT:    fcvt z1.h, p0/m, z1.s
+; CHECK-NO-EXTEND-ROUND-NEXT:    uzp1 z1.h, z1.h, z2.h
+; CHECK-NO-EXTEND-ROUND-NEXT:    and z1.h, z1.h, #0x8000
+; CHECK-NO-EXTEND-ROUND-NEXT:    orr z0.d, z0.d, z1.d
+; CHECK-NO-EXTEND-ROUND-NEXT:    ret
+;
+; CHECK-EXTEND-ROUND-LABEL: test_copysign_v8f16_v8f32:
+; CHECK-EXTEND-ROUND:       // %bb.0:
+; CHECK-EXTEND-ROUND-NEXT:    ptrue p0.s
+; CHECK-EXTEND-ROUND-NEXT:    uunpkhi z3.s, z0.h
+; CHECK-EXTEND-ROUND-NEXT:    fcvt z2.h, p0/m, z2.s
+; CHECK-EXTEND-ROUND-NEXT:    fcvt z1.h, p0/m, z1.s
+; CHECK-EXTEND-ROUND-NEXT:    uunpklo z0.s, z0.h
+; CHECK-EXTEND-ROUND-NEXT:    and z2.h, z2.h, #0x8000
+; CHECK-EXTEND-ROUND-NEXT:    and z3.h, z3.h, #0x7fff
+; CHECK-EXTEND-ROUND-NEXT:    and z1.h, z1.h, #0x8000
+; CHECK-EXTEND-ROUND-NEXT:    and z0.h, z0.h, #0x7fff
+; CHECK-EXTEND-ROUND-NEXT:    orr z2.d, z3.d, z2.d
+; CHECK-EXTEND-ROUND-NEXT:    orr z0.d, z0.d, z1.d
+; CHECK-EXTEND-ROUND-NEXT:    uzp1 z0.h, z0.h, z2.h
+; CHECK-EXTEND-ROUND-NEXT:    ret
   %tmp0 = fptrunc <vscale x 8 x float> %b to <vscale x 8 x half>
   %r = call <vscale x 8 x half> @llvm.copysign.v8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %tmp0)
   ret <vscale x 8 x half> %r

diff  --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-fcopysign.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-fcopysign.ll
new file mode 100644
index 0000000000000..675afe876bb09
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-fcopysign.ll
@@ -0,0 +1,558 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -aarch64-sve-vector-bits-min=256  < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_256,CHECK_NO_EXTEND_ROUND
+; RUN: llc -aarch64-sve-vector-bits-min=512  < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,CHECK_NO_EXTEND_ROUND
+; RUN: llc -aarch64-sve-vector-bits-min=2048 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,CHECK_NO_EXTEND_ROUND
+; RUN: llc -aarch64-sve-vector-bits-min=256  --combiner-vector-fcopysign-extend-round < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_256,CHECK_EXTEND_ROUND
+; RUN: llc -aarch64-sve-vector-bits-min=512  --combiner-vector-fcopysign-extend-round < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,CHECK_EXTEND_ROUND
+; RUN: llc -aarch64-sve-vector-bits-min=2048 --combiner-vector-fcopysign-extend-round < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,CHECK_EXTEND_ROUND
+
+target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+
+target triple = "aarch64-unknown-linux-gnu"
+
+;============ f16
+
+define void @test_copysign_v4f16_v4f16(ptr %ap, ptr %bp) vscale_range(2,0) #0 {
+; CHECK-LABEL: test_copysign_v4f16_v4f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    mvni v2.4h, #128, lsl #8
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    bif v0.8b, v1.8b, v2.8b
+; CHECK-NEXT:    str d0, [x0]
+; CHECK-NEXT:    ret
+  %a = load <4 x half>, ptr %ap
+  %b = load <4 x half>, ptr %bp
+  %r = call <4 x half> @llvm.copysign.v4f16(<4 x half> %a, <4 x half> %b)
+  store <4 x half> %r, ptr %ap
+  ret void
+}
+
+define void @test_copysign_v8f16_v8f16(ptr %ap, ptr %bp) vscale_range(2,0) #0 {
+; CHECK-LABEL: test_copysign_v8f16_v8f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    mvni v2.8h, #128, lsl #8
+; CHECK-NEXT:    bif v0.16b, v1.16b, v2.16b
+; CHECK-NEXT:    str q0, [x0]
+; CHECK-NEXT:    ret
+  %a = load <8 x half>, ptr %ap
+  %b = load <8 x half>, ptr %bp
+  %r = call <8 x half> @llvm.copysign.v8f16(<8 x half> %a, <8 x half> %b)
+  store <8 x half> %r, ptr %ap
+  ret void
+}
+
+define void @test_copysign_v16f16_v16f16(ptr %ap, ptr %bp) vscale_range(2,0) #0 {
+; CHECK-LABEL: test_copysign_v16f16_v16f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.h, vl16
+; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; CHECK-NEXT:    and z1.h, z1.h, #0x8000
+; CHECK-NEXT:    and z0.h, z0.h, #0x7fff
+; CHECK-NEXT:    orr z0.d, z0.d, z1.d
+; CHECK-NEXT:    st1h { z0.h }, p0, [x0]
+; CHECK-NEXT:    ret
+  %a = load <16 x half>, ptr %ap
+  %b = load <16 x half>, ptr %bp
+  %r = call <16 x half> @llvm.copysign.v16f16(<16 x half> %a, <16 x half> %b)
+  store <16 x half> %r, ptr %ap
+  ret void
+}
+
+define void @test_copysign_v32f16_v32f16(ptr %ap, ptr %bp) #0 {
+; VBITS_GE_256-LABEL: test_copysign_v32f16_v32f16:
+; VBITS_GE_256:       // %bb.0:
+; VBITS_GE_256-NEXT:    mov x8, #16
+; VBITS_GE_256-NEXT:    ptrue p0.h, vl16
+; VBITS_GE_256-NEXT:    ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x0]
+; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z3.h }, p0/z, [x1]
+; VBITS_GE_256-NEXT:    and z0.h, z0.h, #0x7fff
+; VBITS_GE_256-NEXT:    and z1.h, z1.h, #0x7fff
+; VBITS_GE_256-NEXT:    and z2.h, z2.h, #0x8000
+; VBITS_GE_256-NEXT:    and z3.h, z3.h, #0x8000
+; VBITS_GE_256-NEXT:    orr z0.d, z0.d, z2.d
+; VBITS_GE_256-NEXT:    orr z1.d, z1.d, z3.d
+; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
+; VBITS_GE_256-NEXT:    st1h { z1.h }, p0, [x0]
+; VBITS_GE_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: test_copysign_v32f16_v32f16:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.h, vl32
+; VBITS_GE_512-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    and z1.h, z1.h, #0x8000
+; VBITS_GE_512-NEXT:    and z0.h, z0.h, #0x7fff
+; VBITS_GE_512-NEXT:    orr z0.d, z0.d, z1.d
+; VBITS_GE_512-NEXT:    st1h { z0.h }, p0, [x0]
+; VBITS_GE_512-NEXT:    ret
+  %a = load <32 x half>, ptr %ap
+  %b = load <32 x half>, ptr %bp
+  %r = call <32 x half> @llvm.copysign.v32f16(<32 x half> %a, <32 x half> %b)
+  store <32 x half> %r, ptr %ap
+  ret void
+}
+
+define void @test_copysign_v64f16_v64f16(ptr %ap, ptr %bp) vscale_range(8,0) #0 {
+; CHECK-LABEL: test_copysign_v64f16_v64f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.h, vl64
+; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; CHECK-NEXT:    and z1.h, z1.h, #0x8000
+; CHECK-NEXT:    and z0.h, z0.h, #0x7fff
+; CHECK-NEXT:    orr z0.d, z0.d, z1.d
+; CHECK-NEXT:    st1h { z0.h }, p0, [x0]
+; CHECK-NEXT:    ret
+  %a = load <64 x half>, ptr %ap
+  %b = load <64 x half>, ptr %bp
+  %r = call <64 x half> @llvm.copysign.v64f16(<64 x half> %a, <64 x half> %b)
+  store <64 x half> %r, ptr %ap
+  ret void
+}
+
+define void @test_copysign_v128f16_v128f16(ptr %ap, ptr %bp) vscale_range(16,0) #0 {
+; CHECK-LABEL: test_copysign_v128f16_v128f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.h, vl128
+; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; CHECK-NEXT:    and z1.h, z1.h, #0x8000
+; CHECK-NEXT:    and z0.h, z0.h, #0x7fff
+; CHECK-NEXT:    orr z0.d, z0.d, z1.d
+; CHECK-NEXT:    st1h { z0.h }, p0, [x0]
+; CHECK-NEXT:    ret
+  %a = load <128 x half>, ptr %ap
+  %b = load <128 x half>, ptr %bp
+  %r = call <128 x half> @llvm.copysign.v128f16(<128 x half> %a, <128 x half> %b)
+  store <128 x half> %r, ptr %ap
+  ret void
+}
+
+;============ f32
+
+define void @test_copysign_v2f32_v2f32(ptr %ap, ptr %bp) vscale_range(2,0) #0 {
+; CHECK-LABEL: test_copysign_v2f32_v2f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    mvni v2.2s, #128, lsl #24
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    bif v0.8b, v1.8b, v2.8b
+; CHECK-NEXT:    str d0, [x0]
+; CHECK-NEXT:    ret
+  %a = load <2 x float>, ptr %ap
+  %b = load <2 x float>, ptr %bp
+  %r = call <2 x float> @llvm.copysign.v2f32(<2 x float> %a, <2 x float> %b)
+  store <2 x float> %r, ptr %ap
+  ret void
+}
+
+define void @test_copysign_v4f32_v4f32(ptr %ap, ptr %bp) vscale_range(2,0) #0 {
+; CHECK-LABEL: test_copysign_v4f32_v4f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    mvni v2.4s, #128, lsl #24
+; CHECK-NEXT:    bif v0.16b, v1.16b, v2.16b
+; CHECK-NEXT:    str q0, [x0]
+; CHECK-NEXT:    ret
+  %a = load <4 x float>, ptr %ap
+  %b = load <4 x float>, ptr %bp
+  %r = call <4 x float> @llvm.copysign.v4f32(<4 x float> %a, <4 x float> %b)
+  store <4 x float> %r, ptr %ap
+  ret void
+}
+
+define void @test_copysign_v8f32_v8f32(ptr %ap, ptr %bp) vscale_range(2,0) #0 {
+; CHECK-LABEL: test_copysign_v8f32_v8f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.s, vl8
+; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x1]
+; CHECK-NEXT:    and z1.s, z1.s, #0x80000000
+; CHECK-NEXT:    and z0.s, z0.s, #0x7fffffff
+; CHECK-NEXT:    orr z0.d, z0.d, z1.d
+; CHECK-NEXT:    st1w { z0.s }, p0, [x0]
+; CHECK-NEXT:    ret
+  %a = load <8 x float>, ptr %ap
+  %b = load <8 x float>, ptr %bp
+  %r = call <8 x float> @llvm.copysign.v8f32(<8 x float> %a, <8 x float> %b)
+  store <8 x float> %r, ptr %ap
+  ret void
+}
+
+define void @test_copysign_v16f32_v16f32(ptr %ap, ptr %bp) #0 {
+; VBITS_GE_256-LABEL: test_copysign_v16f32_v16f32:
+; VBITS_GE_256:       // %bb.0:
+; VBITS_GE_256-NEXT:    mov x8, #8
+; VBITS_GE_256-NEXT:    ptrue p0.s, vl8
+; VBITS_GE_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
+; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z3.s }, p0/z, [x1]
+; VBITS_GE_256-NEXT:    and z0.s, z0.s, #0x7fffffff
+; VBITS_GE_256-NEXT:    and z1.s, z1.s, #0x7fffffff
+; VBITS_GE_256-NEXT:    and z2.s, z2.s, #0x80000000
+; VBITS_GE_256-NEXT:    and z3.s, z3.s, #0x80000000
+; VBITS_GE_256-NEXT:    orr z0.d, z0.d, z2.d
+; VBITS_GE_256-NEXT:    orr z1.d, z1.d, z3.d
+; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
+; VBITS_GE_256-NEXT:    st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: test_copysign_v16f32_v16f32:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.s, vl16
+; VBITS_GE_512-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1w { z1.s }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    and z1.s, z1.s, #0x80000000
+; VBITS_GE_512-NEXT:    and z0.s, z0.s, #0x7fffffff
+; VBITS_GE_512-NEXT:    orr z0.d, z0.d, z1.d
+; VBITS_GE_512-NEXT:    st1w { z0.s }, p0, [x0]
+; VBITS_GE_512-NEXT:    ret
+  %a = load <16 x float>, ptr %ap
+  %b = load <16 x float>, ptr %bp
+  %r = call <16 x float> @llvm.copysign.v16f32(<16 x float> %a, <16 x float> %b)
+  store <16 x float> %r, ptr %ap
+  ret void
+}
+
+define void @test_copysign_v32f32_v32f32(ptr %ap, ptr %bp) vscale_range(8,0) #0 {
+; CHECK-LABEL: test_copysign_v32f32_v32f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.s, vl32
+; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x1]
+; CHECK-NEXT:    and z1.s, z1.s, #0x80000000
+; CHECK-NEXT:    and z0.s, z0.s, #0x7fffffff
+; CHECK-NEXT:    orr z0.d, z0.d, z1.d
+; CHECK-NEXT:    st1w { z0.s }, p0, [x0]
+; CHECK-NEXT:    ret
+  %a = load <32 x float>, ptr %ap
+  %b = load <32 x float>, ptr %bp
+  %r = call <32 x float> @llvm.copysign.v32f32(<32 x float> %a, <32 x float> %b)
+  store <32 x float> %r, ptr %ap
+  ret void
+}
+
+define void @test_copysign_v64f32_v64f32(ptr %ap, ptr %bp) vscale_range(16,0) #0 {
+; CHECK-LABEL: test_copysign_v64f32_v64f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.s, vl64
+; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x1]
+; CHECK-NEXT:    and z1.s, z1.s, #0x80000000
+; CHECK-NEXT:    and z0.s, z0.s, #0x7fffffff
+; CHECK-NEXT:    orr z0.d, z0.d, z1.d
+; CHECK-NEXT:    st1w { z0.s }, p0, [x0]
+; CHECK-NEXT:    ret
+  %a = load <64 x float>, ptr %ap
+  %b = load <64 x float>, ptr %bp
+  %r = call <64 x float> @llvm.copysign.v64f32(<64 x float> %a, <64 x float> %b)
+  store <64 x float> %r, ptr %ap
+  ret void
+}
+
+;============ f64
+
+define void @test_copysign_v2f64_v2f64(ptr %ap, ptr %bp) vscale_range(2,0) #0 {
+; CHECK-LABEL: test_copysign_v2f64_v2f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    movi v0.2d, #0xffffffffffffffff
+; CHECK-NEXT:    ldr q1, [x0]
+; CHECK-NEXT:    ldr q2, [x1]
+; CHECK-NEXT:    fneg v0.2d, v0.2d
+; CHECK-NEXT:    bsl v0.16b, v1.16b, v2.16b
+; CHECK-NEXT:    str q0, [x0]
+; CHECK-NEXT:    ret
+  %a = load <2 x double>, ptr %ap
+  %b = load <2 x double>, ptr %bp
+  %r = call <2 x double> @llvm.copysign.v2f64(<2 x double> %a, <2 x double> %b)
+  store <2 x double> %r, ptr %ap
+  ret void
+}
+
+define void @test_copysign_v4f64_v4f64(ptr %ap, ptr %bp) vscale_range(2,0) #0 {
+; CHECK-LABEL: test_copysign_v4f64_v4f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; CHECK-NEXT:    and z1.d, z1.d, #0x8000000000000000
+; CHECK-NEXT:    and z0.d, z0.d, #0x7fffffffffffffff
+; CHECK-NEXT:    orr z0.d, z0.d, z1.d
+; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
+; CHECK-NEXT:    ret
+  %a = load <4 x double>, ptr %ap
+  %b = load <4 x double>, ptr %bp
+  %r = call <4 x double> @llvm.copysign.v4f64(<4 x double> %a, <4 x double> %b)
+  store <4 x double> %r, ptr %ap
+  ret void
+}
+
+define void @test_copysign_v8f64_v8f64(ptr %ap, ptr %bp) #0 {
+; VBITS_GE_256-LABEL: test_copysign_v8f64_v8f64:
+; VBITS_GE_256:       // %bb.0:
+; VBITS_GE_256-NEXT:    mov x8, #4
+; VBITS_GE_256-NEXT:    ptrue p0.d, vl4
+; VBITS_GE_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
+; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z3.d }, p0/z, [x1]
+; VBITS_GE_256-NEXT:    and z0.d, z0.d, #0x7fffffffffffffff
+; VBITS_GE_256-NEXT:    and z1.d, z1.d, #0x7fffffffffffffff
+; VBITS_GE_256-NEXT:    and z2.d, z2.d, #0x8000000000000000
+; VBITS_GE_256-NEXT:    and z3.d, z3.d, #0x8000000000000000
+; VBITS_GE_256-NEXT:    orr z0.d, z0.d, z2.d
+; VBITS_GE_256-NEXT:    orr z1.d, z1.d, z3.d
+; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
+; VBITS_GE_256-NEXT:    st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: test_copysign_v8f64_v8f64:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    and z1.d, z1.d, #0x8000000000000000
+; VBITS_GE_512-NEXT:    and z0.d, z0.d, #0x7fffffffffffffff
+; VBITS_GE_512-NEXT:    orr z0.d, z0.d, z1.d
+; VBITS_GE_512-NEXT:    st1d { z0.d }, p0, [x0]
+; VBITS_GE_512-NEXT:    ret
+  %a = load <8 x double>, ptr %ap
+  %b = load <8 x double>, ptr %bp
+  %r = call <8 x double> @llvm.copysign.v8f64(<8 x double> %a, <8 x double> %b)
+  store <8 x double> %r, ptr %ap
+  ret void
+}
+
+define void @test_copysign_v16f64_v16f64(ptr %ap, ptr %bp) vscale_range(8,0) #0 {
+; CHECK-LABEL: test_copysign_v16f64_v16f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d, vl16
+; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; CHECK-NEXT:    and z1.d, z1.d, #0x8000000000000000
+; CHECK-NEXT:    and z0.d, z0.d, #0x7fffffffffffffff
+; CHECK-NEXT:    orr z0.d, z0.d, z1.d
+; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
+; CHECK-NEXT:    ret
+  %a = load <16 x double>, ptr %ap
+  %b = load <16 x double>, ptr %bp
+  %r = call <16 x double> @llvm.copysign.v16f64(<16 x double> %a, <16 x double> %b)
+  store <16 x double> %r, ptr %ap
+  ret void
+}
+
+define void @test_copysign_v32f64_v32f64(ptr %ap, ptr %bp) vscale_range(16,0) #0 {
+; CHECK-LABEL: test_copysign_v32f64_v32f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d, vl32
+; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; CHECK-NEXT:    and z1.d, z1.d, #0x8000000000000000
+; CHECK-NEXT:    and z0.d, z0.d, #0x7fffffffffffffff
+; CHECK-NEXT:    orr z0.d, z0.d, z1.d
+; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
+; CHECK-NEXT:    ret
+  %a = load <32 x double>, ptr %ap
+  %b = load <32 x double>, ptr %bp
+  %r = call <32 x double> @llvm.copysign.v32f64(<32 x double> %a, <32 x double> %b)
+  store <32 x double> %r, ptr %ap
+  ret void
+}
+
+;============ v2f32
+
+define void @test_copysign_v2f32_v2f64(ptr %ap, ptr %bp) vscale_range(2,0) #0 {
+; CHECK-LABEL: test_copysign_v2f32_v2f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x1]
+; CHECK-NEXT:    mvni v2.2s, #128, lsl #24
+; CHECK-NEXT:    ldr d1, [x0]
+; CHECK-NEXT:    fcvtn v0.2s, v0.2d
+; CHECK-NEXT:    bit v0.8b, v1.8b, v2.8b
+; CHECK-NEXT:    str d0, [x0]
+; CHECK-NEXT:    ret
+  %a = load <2 x float>, ptr %ap
+  %b = load <2 x double>, ptr %bp
+  %tmp0 = fptrunc <2 x double> %b to <2 x float>
+  %r = call <2 x float> @llvm.copysign.v2f32(<2 x float> %a, <2 x float> %tmp0)
+  store <2 x float> %r, ptr %ap
+  ret void
+}
+
+;============ v4f32
+
+; SplitVecOp #1
+define void @test_copysign_v4f32_v4f64(ptr %ap, ptr %bp) vscale_range(2,0) #0 {
+; CHECK-LABEL: test_copysign_v4f32_v4f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    mvni v2.4s, #128, lsl #24
+; CHECK-NEXT:    fcvt z1.s, p0/m, z1.d
+; CHECK-NEXT:    uzp1 z1.s, z1.s, z1.s
+; CHECK-NEXT:    bif v0.16b, v1.16b, v2.16b
+; CHECK-NEXT:    str q0, [x0]
+; CHECK-NEXT:    ret
+  %a = load <4 x float>, ptr %ap
+  %b = load <4 x double>, ptr %bp
+  %tmp0 = fptrunc <4 x double> %b to <4 x float>
+  %r = call <4 x float> @llvm.copysign.v4f32(<4 x float> %a, <4 x float> %tmp0)
+  store <4 x float> %r, ptr %ap
+  ret void
+}
+
+;============ v2f64
+
+define void @test_copysign_v2f64_v2f32(ptr %ap, ptr %bp) vscale_range(2,0) #0 {
+; CHECK-LABEL: test_copysign_v2f64_v2f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    movi v0.2d, #0xffffffffffffffff
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    ldr q2, [x0]
+; CHECK-NEXT:    fcvtl v1.2d, v1.2s
+; CHECK-NEXT:    fneg v0.2d, v0.2d
+; CHECK-NEXT:    bsl v0.16b, v2.16b, v1.16b
+; CHECK-NEXT:    str q0, [x0]
+; CHECK-NEXT:    ret
+  %a = load <2 x double>, ptr %ap
+  %b = load < 2 x float>, ptr %bp
+  %tmp0 = fpext <2 x float> %b to <2 x double>
+  %r = call <2 x double> @llvm.copysign.v2f64(<2 x double> %a, <2 x double> %tmp0)
+  store <2 x double> %r, ptr %ap
+  ret void
+}
+
+;============ v4f64
+
+; SplitVecRes mismatched
+define void @test_copysign_v4f64_v4f32(ptr %ap, ptr %bp) vscale_range(2,0) #0 {
+; CHECK_NO_EXTEND_ROUND-LABEL: test_copysign_v4f64_v4f32:
+; CHECK_NO_EXTEND_ROUND:       // %bb.0:
+; CHECK_NO_EXTEND_ROUND-NEXT:    ptrue p0.d, vl4
+; CHECK_NO_EXTEND_ROUND-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; CHECK_NO_EXTEND_ROUND-NEXT:    ld1w { z1.d }, p0/z, [x1]
+; CHECK_NO_EXTEND_ROUND-NEXT:    fcvt z1.d, p0/m, z1.s
+; CHECK_NO_EXTEND_ROUND-NEXT:    and z0.d, z0.d, #0x7fffffffffffffff
+; CHECK_NO_EXTEND_ROUND-NEXT:    and z1.d, z1.d, #0x8000000000000000
+; CHECK_NO_EXTEND_ROUND-NEXT:    orr z0.d, z0.d, z1.d
+; CHECK_NO_EXTEND_ROUND-NEXT:    st1d { z0.d }, p0, [x0]
+; CHECK_NO_EXTEND_ROUND-NEXT:    ret
+;
+; CHECK_EXTEND_ROUND-LABEL: test_copysign_v4f64_v4f32:
+; CHECK_EXTEND_ROUND:       // %bb.0:
+; CHECK_EXTEND_ROUND-NEXT:    ptrue p0.d, vl4
+; CHECK_EXTEND_ROUND-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; CHECK_EXTEND_ROUND-NEXT:    ldr q1, [x1]
+; CHECK_EXTEND_ROUND-NEXT:    uunpklo z1.d, z1.s
+; CHECK_EXTEND_ROUND-NEXT:    fcvt z1.d, p0/m, z1.s
+; CHECK_EXTEND_ROUND-NEXT:    and z0.d, z0.d, #0x7fffffffffffffff
+; CHECK_EXTEND_ROUND-NEXT:    and z1.d, z1.d, #0x8000000000000000
+; CHECK_EXTEND_ROUND-NEXT:    orr z0.d, z0.d, z1.d
+; CHECK_EXTEND_ROUND-NEXT:    st1d { z0.d }, p0, [x0]
+; CHECK_EXTEND_ROUND-NEXT:    ret
+  %a = load <4 x double>, ptr %ap
+  %b = load <4 x float>, ptr %bp
+  %tmp0 = fpext <4 x float> %b to <4 x double>
+  %r = call <4 x double> @llvm.copysign.v4f64(<4 x double> %a, <4 x double> %tmp0)
+  store <4 x double> %r, ptr %ap
+  ret void
+}
+
+;============ v4f16
+
+define void @test_copysign_v4f16_v4f32(ptr %ap, ptr %bp) vscale_range(2,0) #0 {
+; CHECK-LABEL: test_copysign_v4f16_v4f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x1]
+; CHECK-NEXT:    mvni v2.4h, #128, lsl #8
+; CHECK-NEXT:    ldr d1, [x0]
+; CHECK-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NEXT:    bit v0.8b, v1.8b, v2.8b
+; CHECK-NEXT:    str d0, [x0]
+; CHECK-NEXT:    ret
+  %a = load <4 x half>, ptr %ap
+  %b = load <4 x float>, ptr %bp
+  %tmp0 = fptrunc <4 x float> %b to <4 x half>
+  %r = call <4 x half> @llvm.copysign.v4f16(<4 x half> %a, <4 x half> %tmp0)
+  store <4 x half> %r, ptr %ap
+  ret void
+}
+
+define void @test_copysign_v4f16_v4f64(ptr %ap, ptr %bp) vscale_range(2,0) #0 {
+; CHECK-LABEL: test_copysign_v4f16_v4f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    mvni v2.4h, #128, lsl #8
+; CHECK-NEXT:    fcvt z1.h, p0/m, z1.d
+; CHECK-NEXT:    uzp1 z1.s, z1.s, z1.s
+; CHECK-NEXT:    uzp1 z1.h, z1.h, z1.h
+; CHECK-NEXT:    bif v0.8b, v1.8b, v2.8b
+; CHECK-NEXT:    str d0, [x0]
+; CHECK-NEXT:    ret
+  %a = load <4 x half>, ptr %ap
+  %b = load <4 x double>, ptr %bp
+  %tmp0 = fptrunc <4 x double> %b to <4 x half>
+  %r = call <4 x half> @llvm.copysign.v4f16(<4 x half> %a, <4 x half> %tmp0)
+  store <4 x half> %r, ptr %ap
+  ret void
+}
+
+declare <4 x half> @llvm.copysign.v4f16(<4 x half> %a, <4 x half> %b) #0
+
+;============ v8f16
+
+
+define void @test_copysign_v8f16_v8f32(ptr %ap, ptr %bp) vscale_range(2,0) #0 {
+; CHECK-LABEL: test_copysign_v8f16_v8f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.s, vl8
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x1]
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    mvni v2.8h, #128, lsl #8
+; CHECK-NEXT:    fcvt z1.h, p0/m, z1.s
+; CHECK-NEXT:    uzp1 z1.h, z1.h, z1.h
+; CHECK-NEXT:    bif v0.16b, v1.16b, v2.16b
+; CHECK-NEXT:    str q0, [x0]
+; CHECK-NEXT:    ret
+  %a = load <8 x half>, ptr %ap
+  %b = load <8 x float>, ptr %bp
+  %tmp0 = fptrunc <8 x float> %b to <8 x half>
+  %r = call <8 x half> @llvm.copysign.v8f16(<8 x half> %a, <8 x half> %tmp0)
+  store <8 x half> %r, ptr %ap
+  ret void
+}
+
+declare <8 x half> @llvm.copysign.v8f16(<8 x half> %a, <8 x half> %b) #0
+declare <16 x half> @llvm.copysign.v16f16(<16 x half> %a, <16 x half> %b) #0
+declare <32 x half> @llvm.copysign.v32f16(<32 x half> %a, <32 x half> %b) #0
+declare <64 x half> @llvm.copysign.v64f16(<64 x half> %a, <64 x half> %b) #0
+declare <128 x half> @llvm.copysign.v128f16(<128 x half> %a, <128 x half> %b) #0
+
+declare <2 x float> @llvm.copysign.v2f32(<2 x float> %a, <2 x float> %b) #0
+declare <4 x float> @llvm.copysign.v4f32(<4 x float> %a, <4 x float> %b) #0
+declare <8 x float> @llvm.copysign.v8f32(<8 x float> %a, <8 x float> %b) #0
+declare <16 x float> @llvm.copysign.v16f32(<16 x float> %a, <16 x float> %b) #0
+declare <32 x float> @llvm.copysign.v32f32(<32 x float> %a, <32 x float> %b) #0
+declare <64 x float> @llvm.copysign.v64f32(<64 x float> %a, <64 x float> %b) #0
+
+declare <2 x double> @llvm.copysign.v2f64(<2 x double> %a, <2 x double> %b) #0
+declare <4 x double> @llvm.copysign.v4f64(<4 x double> %a, <4 x double> %b) #0
+declare <8 x double> @llvm.copysign.v8f64(<8 x double> %a, <8 x double> %b) #0
+declare <16 x double> @llvm.copysign.v16f64(<16 x double> %a, <16 x double> %b) #0
+declare <32 x double> @llvm.copysign.v32f64(<32 x double> %a, <32 x double> %b) #0
+
+attributes #0 = { "target-features"="+sve" }

diff  --git a/llvm/test/CodeGen/AArch64/sve2-fcopysign.ll b/llvm/test/CodeGen/AArch64/sve2-fcopysign.ll
index 65f8aa989e31c..1e6114f78b422 100644
--- a/llvm/test/CodeGen/AArch64/sve2-fcopysign.ll
+++ b/llvm/test/CodeGen/AArch64/sve2-fcopysign.ll
@@ -1,5 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple aarch64-eabi -mattr=+sve2 -o - | FileCheck --check-prefixes=CHECK %s
+; RUN: llc < %s -mtriple aarch64-eabi -mattr=+sve2 -o - | FileCheck --check-prefixes=CHECK,CHECK_NO_EXTEND_ROUND %s
+; RUN: llc < %s -mtriple aarch64-eabi -mattr=+sve2 --combiner-vector-fcopysign-extend-round -o - | FileCheck --check-prefixes=CHECK,CHECK_EXTEND_ROUND %s
 
 target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
 
@@ -44,15 +45,28 @@ define <vscale x 4 x float> @test_copysign_v4f32_v4f32(<vscale x 4 x float> %a,
 
 ; SplitVecOp #1
 define <vscale x 4 x float> @test_copysign_v4f32_v4f64(<vscale x 4 x float> %a, <vscale x 4 x double> %b) #0 {
-; CHECK-LABEL: test_copysign_v4f32_v4f64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    fcvt z2.s, p0/m, z2.d
-; CHECK-NEXT:    fcvt z1.s, p0/m, z1.d
-; CHECK-NEXT:    uzp1 z1.s, z1.s, z2.s
-; CHECK-NEXT:    mov z2.s, #0x7fffffff
-; CHECK-NEXT:    bsl z0.d, z0.d, z1.d, z2.d
-; CHECK-NEXT:    ret
+; CHECK_NO_EXTEND_ROUND-LABEL: test_copysign_v4f32_v4f64:
+; CHECK_NO_EXTEND_ROUND:       // %bb.0:
+; CHECK_NO_EXTEND_ROUND-NEXT:    ptrue p0.d
+; CHECK_NO_EXTEND_ROUND-NEXT:    fcvt z2.s, p0/m, z2.d
+; CHECK_NO_EXTEND_ROUND-NEXT:    fcvt z1.s, p0/m, z1.d
+; CHECK_NO_EXTEND_ROUND-NEXT:    uzp1 z1.s, z1.s, z2.s
+; CHECK_NO_EXTEND_ROUND-NEXT:    mov z2.s, #0x7fffffff
+; CHECK_NO_EXTEND_ROUND-NEXT:    bsl z0.d, z0.d, z1.d, z2.d
+; CHECK_NO_EXTEND_ROUND-NEXT:    ret
+;
+; CHECK_EXTEND_ROUND-LABEL: test_copysign_v4f32_v4f64:
+; CHECK_EXTEND_ROUND:       // %bb.0:
+; CHECK_EXTEND_ROUND-NEXT:    ptrue p0.d
+; CHECK_EXTEND_ROUND-NEXT:    mov z3.s, #0x7fffffff
+; CHECK_EXTEND_ROUND-NEXT:    fcvt z2.s, p0/m, z2.d
+; CHECK_EXTEND_ROUND-NEXT:    uunpkhi z4.d, z0.s
+; CHECK_EXTEND_ROUND-NEXT:    fcvt z1.s, p0/m, z1.d
+; CHECK_EXTEND_ROUND-NEXT:    uunpklo z0.d, z0.s
+; CHECK_EXTEND_ROUND-NEXT:    bsl z4.d, z4.d, z2.d, z3.d
+; CHECK_EXTEND_ROUND-NEXT:    bsl z0.d, z0.d, z1.d, z3.d
+; CHECK_EXTEND_ROUND-NEXT:    uzp1 z0.s, z0.s, z4.s
+; CHECK_EXTEND_ROUND-NEXT:    ret
   %tmp0 = fptrunc <vscale x 4 x double> %b to <vscale x 4 x float>
   %r = call <vscale x 4 x float> @llvm.copysign.v4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %tmp0)
   ret <vscale x 4 x float> %r
@@ -91,17 +105,29 @@ declare <vscale x 2 x double> @llvm.copysign.v2f64(<vscale x 2 x double> %a, <vs
 
 ; SplitVecRes mismatched
 define <vscale x 4 x double> @test_copysign_v4f64_v4f32(<vscale x 4 x double> %a, <vscale x 4 x float> %b) #0 {
-; CHECK-LABEL: test_copysign_v4f64_v4f32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    uunpkhi z3.d, z2.s
-; CHECK-NEXT:    uunpklo z2.d, z2.s
-; CHECK-NEXT:    fcvt z3.d, p0/m, z3.s
-; CHECK-NEXT:    fcvt z2.d, p0/m, z2.s
-; CHECK-NEXT:    mov z4.d, #0x7fffffffffffffff
-; CHECK-NEXT:    bsl z0.d, z0.d, z2.d, z4.d
-; CHECK-NEXT:    bsl z1.d, z1.d, z3.d, z4.d
-; CHECK-NEXT:    ret
+; CHECK_NO_EXTEND_ROUND-LABEL: test_copysign_v4f64_v4f32:
+; CHECK_NO_EXTEND_ROUND:       // %bb.0:
+; CHECK_NO_EXTEND_ROUND-NEXT:    ptrue p0.d
+; CHECK_NO_EXTEND_ROUND-NEXT:    uunpkhi z3.d, z2.s
+; CHECK_NO_EXTEND_ROUND-NEXT:    uunpklo z2.d, z2.s
+; CHECK_NO_EXTEND_ROUND-NEXT:    fcvt z3.d, p0/m, z3.s
+; CHECK_NO_EXTEND_ROUND-NEXT:    fcvt z2.d, p0/m, z2.s
+; CHECK_NO_EXTEND_ROUND-NEXT:    mov z4.d, #0x7fffffffffffffff
+; CHECK_NO_EXTEND_ROUND-NEXT:    bsl z0.d, z0.d, z2.d, z4.d
+; CHECK_NO_EXTEND_ROUND-NEXT:    bsl z1.d, z1.d, z3.d, z4.d
+; CHECK_NO_EXTEND_ROUND-NEXT:    ret
+;
+; CHECK_EXTEND_ROUND-LABEL: test_copysign_v4f64_v4f32:
+; CHECK_EXTEND_ROUND:       // %bb.0:
+; CHECK_EXTEND_ROUND-NEXT:    ptrue p0.d
+; CHECK_EXTEND_ROUND-NEXT:    uunpklo z3.d, z2.s
+; CHECK_EXTEND_ROUND-NEXT:    uunpkhi z2.d, z2.s
+; CHECK_EXTEND_ROUND-NEXT:    fcvt z3.d, p0/m, z3.s
+; CHECK_EXTEND_ROUND-NEXT:    mov z4.d, #0x7fffffffffffffff
+; CHECK_EXTEND_ROUND-NEXT:    fcvt z2.d, p0/m, z2.s
+; CHECK_EXTEND_ROUND-NEXT:    bsl z0.d, z0.d, z3.d, z4.d
+; CHECK_EXTEND_ROUND-NEXT:    bsl z1.d, z1.d, z2.d, z4.d
+; CHECK_EXTEND_ROUND-NEXT:    ret
   %tmp0 = fpext <vscale x 4 x float> %b to <vscale x 4 x double>
   %r = call <vscale x 4 x double> @llvm.copysign.v4f64(<vscale x 4 x double> %a, <vscale x 4 x double> %tmp0)
   ret <vscale x 4 x double> %r
@@ -147,15 +173,28 @@ define <vscale x 4 x half> @test_copysign_v4f16_v4f32(<vscale x 4 x half> %a, <v
 }
 
 define <vscale x 4 x half> @test_copysign_v4f16_v4f64(<vscale x 4 x half> %a, <vscale x 4 x double> %b) #0 {
-; CHECK-LABEL: test_copysign_v4f16_v4f64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    fcvt z2.h, p0/m, z2.d
-; CHECK-NEXT:    fcvt z1.h, p0/m, z1.d
-; CHECK-NEXT:    uzp1 z1.s, z1.s, z2.s
-; CHECK-NEXT:    mov z2.h, #32767 // =0x7fff
-; CHECK-NEXT:    bsl z0.d, z0.d, z1.d, z2.d
-; CHECK-NEXT:    ret
+; CHECK_NO_EXTEND_ROUND-LABEL: test_copysign_v4f16_v4f64:
+; CHECK_NO_EXTEND_ROUND:       // %bb.0:
+; CHECK_NO_EXTEND_ROUND-NEXT:    ptrue p0.d
+; CHECK_NO_EXTEND_ROUND-NEXT:    fcvt z2.h, p0/m, z2.d
+; CHECK_NO_EXTEND_ROUND-NEXT:    fcvt z1.h, p0/m, z1.d
+; CHECK_NO_EXTEND_ROUND-NEXT:    uzp1 z1.s, z1.s, z2.s
+; CHECK_NO_EXTEND_ROUND-NEXT:    mov z2.h, #32767 // =0x7fff
+; CHECK_NO_EXTEND_ROUND-NEXT:    bsl z0.d, z0.d, z1.d, z2.d
+; CHECK_NO_EXTEND_ROUND-NEXT:    ret
+;
+; CHECK_EXTEND_ROUND-LABEL: test_copysign_v4f16_v4f64:
+; CHECK_EXTEND_ROUND:       // %bb.0:
+; CHECK_EXTEND_ROUND-NEXT:    ptrue p0.d
+; CHECK_EXTEND_ROUND-NEXT:    mov z3.h, #32767 // =0x7fff
+; CHECK_EXTEND_ROUND-NEXT:    fcvt z2.h, p0/m, z2.d
+; CHECK_EXTEND_ROUND-NEXT:    uunpkhi z4.d, z0.s
+; CHECK_EXTEND_ROUND-NEXT:    fcvt z1.h, p0/m, z1.d
+; CHECK_EXTEND_ROUND-NEXT:    uunpklo z0.d, z0.s
+; CHECK_EXTEND_ROUND-NEXT:    bsl z4.d, z4.d, z2.d, z3.d
+; CHECK_EXTEND_ROUND-NEXT:    bsl z0.d, z0.d, z1.d, z3.d
+; CHECK_EXTEND_ROUND-NEXT:    uzp1 z0.s, z0.s, z4.s
+; CHECK_EXTEND_ROUND-NEXT:    ret
   %tmp0 = fptrunc <vscale x 4 x double> %b to <vscale x 4 x half>
   %r = call <vscale x 4 x half> @llvm.copysign.v4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %tmp0)
   ret <vscale x 4 x half> %r
@@ -176,15 +215,28 @@ define <vscale x 8 x half> @test_copysign_v8f16_v8f16(<vscale x 8 x half> %a, <v
 }
 
 define <vscale x 8 x half> @test_copysign_v8f16_v8f32(<vscale x 8 x half> %a, <vscale x 8 x float> %b) #0 {
-; CHECK-LABEL: test_copysign_v8f16_v8f32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.s
-; CHECK-NEXT:    fcvt z2.h, p0/m, z2.s
-; CHECK-NEXT:    fcvt z1.h, p0/m, z1.s
-; CHECK-NEXT:    uzp1 z1.h, z1.h, z2.h
-; CHECK-NEXT:    mov z2.h, #32767 // =0x7fff
-; CHECK-NEXT:    bsl z0.d, z0.d, z1.d, z2.d
-; CHECK-NEXT:    ret
+; CHECK_NO_EXTEND_ROUND-LABEL: test_copysign_v8f16_v8f32:
+; CHECK_NO_EXTEND_ROUND:       // %bb.0:
+; CHECK_NO_EXTEND_ROUND-NEXT:    ptrue p0.s
+; CHECK_NO_EXTEND_ROUND-NEXT:    fcvt z2.h, p0/m, z2.s
+; CHECK_NO_EXTEND_ROUND-NEXT:    fcvt z1.h, p0/m, z1.s
+; CHECK_NO_EXTEND_ROUND-NEXT:    uzp1 z1.h, z1.h, z2.h
+; CHECK_NO_EXTEND_ROUND-NEXT:    mov z2.h, #32767 // =0x7fff
+; CHECK_NO_EXTEND_ROUND-NEXT:    bsl z0.d, z0.d, z1.d, z2.d
+; CHECK_NO_EXTEND_ROUND-NEXT:    ret
+;
+; CHECK_EXTEND_ROUND-LABEL: test_copysign_v8f16_v8f32:
+; CHECK_EXTEND_ROUND:       // %bb.0:
+; CHECK_EXTEND_ROUND-NEXT:    ptrue p0.s
+; CHECK_EXTEND_ROUND-NEXT:    mov z3.h, #32767 // =0x7fff
+; CHECK_EXTEND_ROUND-NEXT:    fcvt z2.h, p0/m, z2.s
+; CHECK_EXTEND_ROUND-NEXT:    uunpkhi z4.s, z0.h
+; CHECK_EXTEND_ROUND-NEXT:    fcvt z1.h, p0/m, z1.s
+; CHECK_EXTEND_ROUND-NEXT:    uunpklo z0.s, z0.h
+; CHECK_EXTEND_ROUND-NEXT:    bsl z4.d, z4.d, z2.d, z3.d
+; CHECK_EXTEND_ROUND-NEXT:    bsl z0.d, z0.d, z1.d, z3.d
+; CHECK_EXTEND_ROUND-NEXT:    uzp1 z0.h, z0.h, z4.h
+; CHECK_EXTEND_ROUND-NEXT:    ret
   %tmp0 = fptrunc <vscale x 8 x float> %b to <vscale x 8 x half>
   %r = call <vscale x 8 x half> @llvm.copysign.v8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %tmp0)
   ret <vscale x 8 x half> %r

diff  --git a/llvm/test/CodeGen/AArch64/sve2-fixed-length-fcopysign.ll b/llvm/test/CodeGen/AArch64/sve2-fixed-length-fcopysign.ll
new file mode 100644
index 0000000000000..82ea8514f24dd
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve2-fixed-length-fcopysign.ll
@@ -0,0 +1,536 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -aarch64-sve-vector-bits-min=256  < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_256,CHECK_NO_EXTEND_ROUND
+; RUN: llc -aarch64-sve-vector-bits-min=512  < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,CHECK_NO_EXTEND_ROUND
+; RUN: llc -aarch64-sve-vector-bits-min=2048 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,CHECK_NO_EXTEND_ROUND
+; RUN: llc -aarch64-sve-vector-bits-min=256  --combiner-vector-fcopysign-extend-round < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_256,CHECK_EXTEND_ROUND
+; RUN: llc -aarch64-sve-vector-bits-min=512  --combiner-vector-fcopysign-extend-round < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,CHECK_EXTEND_ROUND
+; RUN: llc -aarch64-sve-vector-bits-min=2048 --combiner-vector-fcopysign-extend-round < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,CHECK_EXTEND_ROUND
+
+
+target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+
+target triple = "aarch64-unknown-linux-gnu"
+
+;============ f16
+
+define void @test_copysign_v4f16_v4f16(ptr %ap, ptr %bp) vscale_range(2,0) #0 {
+; CHECK-LABEL: test_copysign_v4f16_v4f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    mvni v2.4h, #128, lsl #8
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    bif v0.8b, v1.8b, v2.8b
+; CHECK-NEXT:    str d0, [x0]
+; CHECK-NEXT:    ret
+  %a = load <4 x half>, ptr %ap
+  %b = load <4 x half>, ptr %bp
+  %r = call <4 x half> @llvm.copysign.v4f16(<4 x half> %a, <4 x half> %b)
+  store <4 x half> %r, ptr %ap
+  ret void
+}
+
+define void @test_copysign_v8f16_v8f16(ptr %ap, ptr %bp) vscale_range(2,0) #0 {
+; CHECK-LABEL: test_copysign_v8f16_v8f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    mvni v2.8h, #128, lsl #8
+; CHECK-NEXT:    bif v0.16b, v1.16b, v2.16b
+; CHECK-NEXT:    str q0, [x0]
+; CHECK-NEXT:    ret
+  %a = load <8 x half>, ptr %ap
+  %b = load <8 x half>, ptr %bp
+  %r = call <8 x half> @llvm.copysign.v8f16(<8 x half> %a, <8 x half> %b)
+  store <8 x half> %r, ptr %ap
+  ret void
+}
+
+define void @test_copysign_v16f16_v16f16(ptr %ap, ptr %bp) vscale_range(2,0) #0 {
+; CHECK-LABEL: test_copysign_v16f16_v16f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.h, vl16
+; CHECK-NEXT:    mov z2.h, #32767 // =0x7fff
+; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; CHECK-NEXT:    bsl z0.d, z0.d, z1.d, z2.d
+; CHECK-NEXT:    st1h { z0.h }, p0, [x0]
+; CHECK-NEXT:    ret
+  %a = load <16 x half>, ptr %ap
+  %b = load <16 x half>, ptr %bp
+  %r = call <16 x half> @llvm.copysign.v16f16(<16 x half> %a, <16 x half> %b)
+  store <16 x half> %r, ptr %ap
+  ret void
+}
+
+define void @test_copysign_v32f16_v32f16(ptr %ap, ptr %bp) #0 {
+; VBITS_GE_256-LABEL: test_copysign_v32f16_v32f16:
+; VBITS_GE_256:       // %bb.0:
+; VBITS_GE_256-NEXT:    mov x8, #16
+; VBITS_GE_256-NEXT:    ptrue p0.h, vl16
+; VBITS_GE_256-NEXT:    mov z4.h, #32767 // =0x7fff
+; VBITS_GE_256-NEXT:    ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x0]
+; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x1, x8, lsl #1]
+; VBITS_GE_256-NEXT:    ld1h { z3.h }, p0/z, [x1]
+; VBITS_GE_256-NEXT:    bsl z0.d, z0.d, z2.d, z4.d
+; VBITS_GE_256-NEXT:    bsl z1.d, z1.d, z3.d, z4.d
+; VBITS_GE_256-NEXT:    st1h { z0.h }, p0, [x0, x8, lsl #1]
+; VBITS_GE_256-NEXT:    st1h { z1.h }, p0, [x0]
+; VBITS_GE_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: test_copysign_v32f16_v32f16:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.h, vl32
+; VBITS_GE_512-NEXT:    mov z2.h, #32767 // =0x7fff
+; VBITS_GE_512-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    bsl z0.d, z0.d, z1.d, z2.d
+; VBITS_GE_512-NEXT:    st1h { z0.h }, p0, [x0]
+; VBITS_GE_512-NEXT:    ret
+  %a = load <32 x half>, ptr %ap
+  %b = load <32 x half>, ptr %bp
+  %r = call <32 x half> @llvm.copysign.v32f16(<32 x half> %a, <32 x half> %b)
+  store <32 x half> %r, ptr %ap
+  ret void
+}
+
+define void @test_copysign_v64f16_v64f16(ptr %ap, ptr %bp) vscale_range(8,0) #0 {
+; CHECK-LABEL: test_copysign_v64f16_v64f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.h, vl64
+; CHECK-NEXT:    mov z2.h, #32767 // =0x7fff
+; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; CHECK-NEXT:    bsl z0.d, z0.d, z1.d, z2.d
+; CHECK-NEXT:    st1h { z0.h }, p0, [x0]
+; CHECK-NEXT:    ret
+  %a = load <64 x half>, ptr %ap
+  %b = load <64 x half>, ptr %bp
+  %r = call <64 x half> @llvm.copysign.v64f16(<64 x half> %a, <64 x half> %b)
+  store <64 x half> %r, ptr %ap
+  ret void
+}
+
+define void @test_copysign_v128f16_v128f16(ptr %ap, ptr %bp) vscale_range(16,0) #0 {
+; CHECK-LABEL: test_copysign_v128f16_v128f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.h, vl128
+; CHECK-NEXT:    mov z2.h, #32767 // =0x7fff
+; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x1]
+; CHECK-NEXT:    bsl z0.d, z0.d, z1.d, z2.d
+; CHECK-NEXT:    st1h { z0.h }, p0, [x0]
+; CHECK-NEXT:    ret
+  %a = load <128 x half>, ptr %ap
+  %b = load <128 x half>, ptr %bp
+  %r = call <128 x half> @llvm.copysign.v128f16(<128 x half> %a, <128 x half> %b)
+  store <128 x half> %r, ptr %ap
+  ret void
+}
+
+;============ f32
+
+define void @test_copysign_v2f32_v2f32(ptr %ap, ptr %bp) vscale_range(2,0) #0 {
+; CHECK-LABEL: test_copysign_v2f32_v2f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    mvni v2.2s, #128, lsl #24
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    bif v0.8b, v1.8b, v2.8b
+; CHECK-NEXT:    str d0, [x0]
+; CHECK-NEXT:    ret
+  %a = load <2 x float>, ptr %ap
+  %b = load <2 x float>, ptr %bp
+  %r = call <2 x float> @llvm.copysign.v2f32(<2 x float> %a, <2 x float> %b)
+  store <2 x float> %r, ptr %ap
+  ret void
+}
+
+define void @test_copysign_v4f32_v4f32(ptr %ap, ptr %bp) vscale_range(2,0) #0 {
+; CHECK-LABEL: test_copysign_v4f32_v4f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    mvni v2.4s, #128, lsl #24
+; CHECK-NEXT:    bif v0.16b, v1.16b, v2.16b
+; CHECK-NEXT:    str q0, [x0]
+; CHECK-NEXT:    ret
+  %a = load <4 x float>, ptr %ap
+  %b = load <4 x float>, ptr %bp
+  %r = call <4 x float> @llvm.copysign.v4f32(<4 x float> %a, <4 x float> %b)
+  store <4 x float> %r, ptr %ap
+  ret void
+}
+
+define void @test_copysign_v8f32_v8f32(ptr %ap, ptr %bp) vscale_range(2,0) #0 {
+; CHECK-LABEL: test_copysign_v8f32_v8f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.s, vl8
+; CHECK-NEXT:    mov z2.s, #0x7fffffff
+; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x1]
+; CHECK-NEXT:    bsl z0.d, z0.d, z1.d, z2.d
+; CHECK-NEXT:    st1w { z0.s }, p0, [x0]
+; CHECK-NEXT:    ret
+  %a = load <8 x float>, ptr %ap
+  %b = load <8 x float>, ptr %bp
+  %r = call <8 x float> @llvm.copysign.v8f32(<8 x float> %a, <8 x float> %b)
+  store <8 x float> %r, ptr %ap
+  ret void
+}
+
+define void @test_copysign_v16f32_v16f32(ptr %ap, ptr %bp) #0 {
+; VBITS_GE_256-LABEL: test_copysign_v16f32_v16f32:
+; VBITS_GE_256:       // %bb.0:
+; VBITS_GE_256-NEXT:    mov x8, #8
+; VBITS_GE_256-NEXT:    ptrue p0.s, vl8
+; VBITS_GE_256-NEXT:    mov z4.s, #0x7fffffff
+; VBITS_GE_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z1.s }, p0/z, [x0]
+; VBITS_GE_256-NEXT:    ld1w { z2.s }, p0/z, [x1, x8, lsl #2]
+; VBITS_GE_256-NEXT:    ld1w { z3.s }, p0/z, [x1]
+; VBITS_GE_256-NEXT:    bsl z0.d, z0.d, z2.d, z4.d
+; VBITS_GE_256-NEXT:    bsl z1.d, z1.d, z3.d, z4.d
+; VBITS_GE_256-NEXT:    st1w { z0.s }, p0, [x0, x8, lsl #2]
+; VBITS_GE_256-NEXT:    st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: test_copysign_v16f32_v16f32:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.s, vl16
+; VBITS_GE_512-NEXT:    mov z2.s, #0x7fffffff
+; VBITS_GE_512-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1w { z1.s }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    bsl z0.d, z0.d, z1.d, z2.d
+; VBITS_GE_512-NEXT:    st1w { z0.s }, p0, [x0]
+; VBITS_GE_512-NEXT:    ret
+  %a = load <16 x float>, ptr %ap
+  %b = load <16 x float>, ptr %bp
+  %r = call <16 x float> @llvm.copysign.v16f32(<16 x float> %a, <16 x float> %b)
+  store <16 x float> %r, ptr %ap
+  ret void
+}
+
+define void @test_copysign_v32f32_v32f32(ptr %ap, ptr %bp) vscale_range(8,0) #0 {
+; CHECK-LABEL: test_copysign_v32f32_v32f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.s, vl32
+; CHECK-NEXT:    mov z2.s, #0x7fffffff
+; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x1]
+; CHECK-NEXT:    bsl z0.d, z0.d, z1.d, z2.d
+; CHECK-NEXT:    st1w { z0.s }, p0, [x0]
+; CHECK-NEXT:    ret
+  %a = load <32 x float>, ptr %ap
+  %b = load <32 x float>, ptr %bp
+  %r = call <32 x float> @llvm.copysign.v32f32(<32 x float> %a, <32 x float> %b)
+  store <32 x float> %r, ptr %ap
+  ret void
+}
+
+define void @test_copysign_v64f32_v64f32(ptr %ap, ptr %bp) vscale_range(16,0) #0 {
+; CHECK-LABEL: test_copysign_v64f32_v64f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.s, vl64
+; CHECK-NEXT:    mov z2.s, #0x7fffffff
+; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x1]
+; CHECK-NEXT:    bsl z0.d, z0.d, z1.d, z2.d
+; CHECK-NEXT:    st1w { z0.s }, p0, [x0]
+; CHECK-NEXT:    ret
+  %a = load <64 x float>, ptr %ap
+  %b = load <64 x float>, ptr %bp
+  %r = call <64 x float> @llvm.copysign.v64f32(<64 x float> %a, <64 x float> %b)
+  store <64 x float> %r, ptr %ap
+  ret void
+}
+
+;============ f64
+
+define void @test_copysign_v2f64_v2f64(ptr %ap, ptr %bp) vscale_range(2,0) #0 {
+; CHECK-LABEL: test_copysign_v2f64_v2f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    movi v0.2d, #0xffffffffffffffff
+; CHECK-NEXT:    ldr q1, [x0]
+; CHECK-NEXT:    ldr q2, [x1]
+; CHECK-NEXT:    fneg v0.2d, v0.2d
+; CHECK-NEXT:    bsl v0.16b, v1.16b, v2.16b
+; CHECK-NEXT:    str q0, [x0]
+; CHECK-NEXT:    ret
+  %a = load <2 x double>, ptr %ap
+  %b = load <2 x double>, ptr %bp
+  %r = call <2 x double> @llvm.copysign.v2f64(<2 x double> %a, <2 x double> %b)
+  store <2 x double> %r, ptr %ap
+  ret void
+}
+
+define void @test_copysign_v4f64_v4f64(ptr %ap, ptr %bp) vscale_range(2,0) #0 {
+; CHECK-LABEL: test_copysign_v4f64_v4f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    mov z2.d, #0x7fffffffffffffff
+; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; CHECK-NEXT:    bsl z0.d, z0.d, z1.d, z2.d
+; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
+; CHECK-NEXT:    ret
+  %a = load <4 x double>, ptr %ap
+  %b = load <4 x double>, ptr %bp
+  %r = call <4 x double> @llvm.copysign.v4f64(<4 x double> %a, <4 x double> %b)
+  store <4 x double> %r, ptr %ap
+  ret void
+}
+
+define void @test_copysign_v8f64_v8f64(ptr %ap, ptr %bp) #0 {
+; VBITS_GE_256-LABEL: test_copysign_v8f64_v8f64:
+; VBITS_GE_256:       // %bb.0:
+; VBITS_GE_256-NEXT:    mov x8, #4
+; VBITS_GE_256-NEXT:    ptrue p0.d, vl4
+; VBITS_GE_256-NEXT:    mov z4.d, #0x7fffffffffffffff
+; VBITS_GE_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z1.d }, p0/z, [x0]
+; VBITS_GE_256-NEXT:    ld1d { z2.d }, p0/z, [x1, x8, lsl #3]
+; VBITS_GE_256-NEXT:    ld1d { z3.d }, p0/z, [x1]
+; VBITS_GE_256-NEXT:    bsl z0.d, z0.d, z2.d, z4.d
+; VBITS_GE_256-NEXT:    bsl z1.d, z1.d, z3.d, z4.d
+; VBITS_GE_256-NEXT:    st1d { z0.d }, p0, [x0, x8, lsl #3]
+; VBITS_GE_256-NEXT:    st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: test_copysign_v8f64_v8f64:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    mov z2.d, #0x7fffffffffffffff
+; VBITS_GE_512-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; VBITS_GE_512-NEXT:    bsl z0.d, z0.d, z1.d, z2.d
+; VBITS_GE_512-NEXT:    st1d { z0.d }, p0, [x0]
+; VBITS_GE_512-NEXT:    ret
+  %a = load <8 x double>, ptr %ap
+  %b = load <8 x double>, ptr %bp
+  %r = call <8 x double> @llvm.copysign.v8f64(<8 x double> %a, <8 x double> %b)
+  store <8 x double> %r, ptr %ap
+  ret void
+}
+
+define void @test_copysign_v16f64_v16f64(ptr %ap, ptr %bp) vscale_range(8,0) #0 {
+; CHECK-LABEL: test_copysign_v16f64_v16f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d, vl16
+; CHECK-NEXT:    mov z2.d, #0x7fffffffffffffff
+; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; CHECK-NEXT:    bsl z0.d, z0.d, z1.d, z2.d
+; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
+; CHECK-NEXT:    ret
+  %a = load <16 x double>, ptr %ap
+  %b = load <16 x double>, ptr %bp
+  %r = call <16 x double> @llvm.copysign.v16f64(<16 x double> %a, <16 x double> %b)
+  store <16 x double> %r, ptr %ap
+  ret void
+}
+
+define void @test_copysign_v32f64_v32f64(ptr %ap, ptr %bp) vscale_range(16,0) #0 {
+; CHECK-LABEL: test_copysign_v32f64_v32f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d, vl32
+; CHECK-NEXT:    mov z2.d, #0x7fffffffffffffff
+; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; CHECK-NEXT:    bsl z0.d, z0.d, z1.d, z2.d
+; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
+; CHECK-NEXT:    ret
+  %a = load <32 x double>, ptr %ap
+  %b = load <32 x double>, ptr %bp
+  %r = call <32 x double> @llvm.copysign.v32f64(<32 x double> %a, <32 x double> %b)
+  store <32 x double> %r, ptr %ap
+  ret void
+}
+
+;============ v2f32
+
+define void @test_copysign_v2f32_v2f64(ptr %ap, ptr %bp) vscale_range(2,0) #0 {
+; CHECK-LABEL: test_copysign_v2f32_v2f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x1]
+; CHECK-NEXT:    mvni v2.2s, #128, lsl #24
+; CHECK-NEXT:    ldr d1, [x0]
+; CHECK-NEXT:    fcvtn v0.2s, v0.2d
+; CHECK-NEXT:    bit v0.8b, v1.8b, v2.8b
+; CHECK-NEXT:    str d0, [x0]
+; CHECK-NEXT:    ret
+  %a = load <2 x float>, ptr %ap
+  %b = load <2 x double>, ptr %bp
+  %tmp0 = fptrunc <2 x double> %b to <2 x float>
+  %r = call <2 x float> @llvm.copysign.v2f32(<2 x float> %a, <2 x float> %tmp0)
+  store <2 x float> %r, ptr %ap
+  ret void
+}
+
+;============ v4f32
+
+; SplitVecOp #1
+define void @test_copysign_v4f32_v4f64(ptr %ap, ptr %bp) vscale_range(2,0) #0 {
+; CHECK-LABEL: test_copysign_v4f32_v4f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    mvni v2.4s, #128, lsl #24
+; CHECK-NEXT:    fcvt z1.s, p0/m, z1.d
+; CHECK-NEXT:    uzp1 z1.s, z1.s, z1.s
+; CHECK-NEXT:    bif v0.16b, v1.16b, v2.16b
+; CHECK-NEXT:    str q0, [x0]
+; CHECK-NEXT:    ret
+  %a = load <4 x float>, ptr %ap
+  %b = load <4 x double>, ptr %bp
+  %tmp0 = fptrunc <4 x double> %b to <4 x float>
+  %r = call <4 x float> @llvm.copysign.v4f32(<4 x float> %a, <4 x float> %tmp0)
+  store <4 x float> %r, ptr %ap
+  ret void
+}
+
+;============ v2f64
+
+define void @test_copysign_v2f64_v2f32(ptr %ap, ptr %bp) vscale_range(2,0) #0 {
+; CHECK-LABEL: test_copysign_v2f64_v2f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    movi v0.2d, #0xffffffffffffffff
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    ldr q2, [x0]
+; CHECK-NEXT:    fcvtl v1.2d, v1.2s
+; CHECK-NEXT:    fneg v0.2d, v0.2d
+; CHECK-NEXT:    bsl v0.16b, v2.16b, v1.16b
+; CHECK-NEXT:    str q0, [x0]
+; CHECK-NEXT:    ret
+  %a = load <2 x double>, ptr %ap
+  %b = load < 2 x float>, ptr %bp
+  %tmp0 = fpext <2 x float> %b to <2 x double>
+  %r = call <2 x double> @llvm.copysign.v2f64(<2 x double> %a, <2 x double> %tmp0)
+  store <2 x double> %r, ptr %ap
+  ret void
+}
+
+;============ v4f64
+
+; SplitVecRes mismatched
+define void @test_copysign_v4f64_v4f32(ptr %ap, ptr %bp) vscale_range(2,0) #0 {
+; CHECK_NO_EXTEND_ROUND-LABEL: test_copysign_v4f64_v4f32:
+; CHECK_NO_EXTEND_ROUND:       // %bb.0:
+; CHECK_NO_EXTEND_ROUND-NEXT:    ptrue p0.d, vl4
+; CHECK_NO_EXTEND_ROUND-NEXT:    mov z2.d, #0x7fffffffffffffff
+; CHECK_NO_EXTEND_ROUND-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; CHECK_NO_EXTEND_ROUND-NEXT:    ld1w { z1.d }, p0/z, [x1]
+; CHECK_NO_EXTEND_ROUND-NEXT:    fcvt z1.d, p0/m, z1.s
+; CHECK_NO_EXTEND_ROUND-NEXT:    bsl z0.d, z0.d, z1.d, z2.d
+; CHECK_NO_EXTEND_ROUND-NEXT:    st1d { z0.d }, p0, [x0]
+; CHECK_NO_EXTEND_ROUND-NEXT:    ret
+;
+; CHECK_EXTEND_ROUND-LABEL: test_copysign_v4f64_v4f32:
+; CHECK_EXTEND_ROUND:       // %bb.0:
+; CHECK_EXTEND_ROUND-NEXT:    ptrue p0.d, vl4
+; CHECK_EXTEND_ROUND-NEXT:    mov z2.d, #0x7fffffffffffffff
+; CHECK_EXTEND_ROUND-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; CHECK_EXTEND_ROUND-NEXT:    ldr q1, [x1]
+; CHECK_EXTEND_ROUND-NEXT:    uunpklo z1.d, z1.s
+; CHECK_EXTEND_ROUND-NEXT:    fcvt z1.d, p0/m, z1.s
+; CHECK_EXTEND_ROUND-NEXT:    bsl z0.d, z0.d, z1.d, z2.d
+; CHECK_EXTEND_ROUND-NEXT:    st1d { z0.d }, p0, [x0]
+; CHECK_EXTEND_ROUND-NEXT:    ret
+  %a = load <4 x double>, ptr %ap
+  %b = load <4 x float>, ptr %bp
+  %tmp0 = fpext <4 x float> %b to <4 x double>
+  %r = call <4 x double> @llvm.copysign.v4f64(<4 x double> %a, <4 x double> %tmp0)
+  store <4 x double> %r, ptr %ap
+  ret void
+}
+
+;============ v4f16
+
+define void @test_copysign_v4f16_v4f32(ptr %ap, ptr %bp) vscale_range(2,0) #0 {
+; CHECK-LABEL: test_copysign_v4f16_v4f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x1]
+; CHECK-NEXT:    mvni v2.4h, #128, lsl #8
+; CHECK-NEXT:    ldr d1, [x0]
+; CHECK-NEXT:    fcvtn v0.4h, v0.4s
+; CHECK-NEXT:    bit v0.8b, v1.8b, v2.8b
+; CHECK-NEXT:    str d0, [x0]
+; CHECK-NEXT:    ret
+  %a = load <4 x half>, ptr %ap
+  %b = load <4 x float>, ptr %bp
+  %tmp0 = fptrunc <4 x float> %b to <4 x half>
+  %r = call <4 x half> @llvm.copysign.v4f16(<4 x half> %a, <4 x half> %tmp0)
+  store <4 x half> %r, ptr %ap
+  ret void
+}
+
+define void @test_copysign_v4f16_v4f64(ptr %ap, ptr %bp) vscale_range(2,0) #0 {
+; CHECK-LABEL: test_copysign_v4f16_v4f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    mvni v2.4h, #128, lsl #8
+; CHECK-NEXT:    fcvt z1.h, p0/m, z1.d
+; CHECK-NEXT:    uzp1 z1.s, z1.s, z1.s
+; CHECK-NEXT:    uzp1 z1.h, z1.h, z1.h
+; CHECK-NEXT:    bif v0.8b, v1.8b, v2.8b
+; CHECK-NEXT:    str d0, [x0]
+; CHECK-NEXT:    ret
+  %a = load <4 x half>, ptr %ap
+  %b = load <4 x double>, ptr %bp
+  %tmp0 = fptrunc <4 x double> %b to <4 x half>
+  %r = call <4 x half> @llvm.copysign.v4f16(<4 x half> %a, <4 x half> %tmp0)
+  store <4 x half> %r, ptr %ap
+  ret void
+}
+
+declare <4 x half> @llvm.copysign.v4f16(<4 x half> %a, <4 x half> %b) #0
+
+;============ v8f16
+
+
+define void @test_copysign_v8f16_v8f32(ptr %ap, ptr %bp) vscale_range(2,0) #0 {
+; CHECK-LABEL: test_copysign_v8f16_v8f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.s, vl8
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x1]
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    mvni v2.8h, #128, lsl #8
+; CHECK-NEXT:    fcvt z1.h, p0/m, z1.s
+; CHECK-NEXT:    uzp1 z1.h, z1.h, z1.h
+; CHECK-NEXT:    bif v0.16b, v1.16b, v2.16b
+; CHECK-NEXT:    str q0, [x0]
+; CHECK-NEXT:    ret
+  %a = load <8 x half>, ptr %ap
+  %b = load <8 x float>, ptr %bp
+  %tmp0 = fptrunc <8 x float> %b to <8 x half>
+  %r = call <8 x half> @llvm.copysign.v8f16(<8 x half> %a, <8 x half> %tmp0)
+  store <8 x half> %r, ptr %ap
+  ret void
+}
+
+declare <8 x half> @llvm.copysign.v8f16(<8 x half> %a, <8 x half> %b) #0
+declare <16 x half> @llvm.copysign.v16f16(<16 x half> %a, <16 x half> %b) #0
+declare <32 x half> @llvm.copysign.v32f16(<32 x half> %a, <32 x half> %b) #0
+declare <64 x half> @llvm.copysign.v64f16(<64 x half> %a, <64 x half> %b) #0
+declare <128 x half> @llvm.copysign.v128f16(<128 x half> %a, <128 x half> %b) #0
+
+declare <2 x float> @llvm.copysign.v2f32(<2 x float> %a, <2 x float> %b) #0
+declare <4 x float> @llvm.copysign.v4f32(<4 x float> %a, <4 x float> %b) #0
+declare <8 x float> @llvm.copysign.v8f32(<8 x float> %a, <8 x float> %b) #0
+declare <16 x float> @llvm.copysign.v16f32(<16 x float> %a, <16 x float> %b) #0
+declare <32 x float> @llvm.copysign.v32f32(<32 x float> %a, <32 x float> %b) #0
+declare <64 x float> @llvm.copysign.v64f32(<64 x float> %a, <64 x float> %b) #0
+
+declare <2 x double> @llvm.copysign.v2f64(<2 x double> %a, <2 x double> %b) #0
+declare <4 x double> @llvm.copysign.v4f64(<4 x double> %a, <4 x double> %b) #0
+declare <8 x double> @llvm.copysign.v8f64(<8 x double> %a, <8 x double> %b) #0
+declare <16 x double> @llvm.copysign.v16f64(<16 x double> %a, <16 x double> %b) #0
+declare <32 x double> @llvm.copysign.v32f64(<32 x double> %a, <32 x double> %b) #0
+
+attributes #0 = { "target-features"="+sve2" }


        


More information about the llvm-commits mailing list