[llvm] [LLVM][SVE] Add isel for scalable vector bfloat copysign operations. (PR #130098)
Paul Walker via llvm-commits
llvm-commits at lists.llvm.org
Thu Mar 6 05:10:08 PST 2025
https://github.com/paulwalker-arm created https://github.com/llvm/llvm-project/pull/130098
None
>From 4adf4677451bab80e57b0343c2f38f93c32efbb2 Mon Sep 17 00:00:00 2001
From: Paul Walker <paul.walker at arm.com>
Date: Thu, 6 Mar 2025 11:45:29 +0000
Subject: [PATCH] [LLVM][SVE] Add isel for scalable vector bfloat copysign
operations.
---
.../Target/AArch64/AArch64ISelLowering.cpp | 3 +-
llvm/test/CodeGen/AArch64/sve-fcopysign.ll | 154 +++++++++++++++
...e-streaming-mode-fixed-length-fcopysign.ll | 37 +---
llvm/test/CodeGen/AArch64/sve2-fcopysign.ll | 178 ++++++++++++++++++
4 files changed, 343 insertions(+), 29 deletions(-)
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index e7d141d22c7c7..cc225a84734ab 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1700,6 +1700,7 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
setOperationAction(ISD::BITCAST, VT, Custom);
setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
setOperationAction(ISD::FABS, VT, Legal);
+ setOperationAction(ISD::FCOPYSIGN, VT, Custom);
setOperationAction(ISD::FNEG, VT, Legal);
setOperationAction(ISD::FP_EXTEND, VT, Custom);
setOperationAction(ISD::FP_ROUND, VT, Custom);
@@ -10690,7 +10691,7 @@ SDValue AArch64TargetLowering::LowerFCOPYSIGN(SDValue Op,
// a SVE FCOPYSIGN.
if (!VT.isVector() && !Subtarget->isNeonAvailable() &&
Subtarget->isSVEorStreamingSVEAvailable()) {
- if (VT != MVT::f16 && VT != MVT::f32 && VT != MVT::f64)
+ if (VT != MVT::f16 && VT != MVT::f32 && VT != MVT::f64 && VT != MVT::bf16)
return SDValue();
EVT SVT = getPackedSVEVectorVT(VT);
diff --git a/llvm/test/CodeGen/AArch64/sve-fcopysign.ll b/llvm/test/CodeGen/AArch64/sve-fcopysign.ll
index 6f11bf963c1d1..39535a11e4d12 100644
--- a/llvm/test/CodeGen/AArch64/sve-fcopysign.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fcopysign.ll
@@ -185,6 +185,19 @@ define <vscale x 2 x float> @copysign_nxv2f32_nxv2f64(<vscale x 2 x float> %a, <
ret <vscale x 2 x float> %r
}
+define <vscale x 2 x float> @copysign_nxv2f32_nxv2bf16(<vscale x 2 x float> %a, <vscale x 2 x bfloat> %b) {
+; CHECK-LABEL: copysign_nxv2f32_nxv2bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: lsl z1.s, z1.s, #16
+; CHECK-NEXT: and z0.s, z0.s, #0x7fffffff
+; CHECK-NEXT: and z1.s, z1.s, #0x80000000
+; CHECK-NEXT: orr z0.d, z0.d, z1.d
+; CHECK-NEXT: ret
+ %tmp0 = fpext <vscale x 2 x bfloat> %b to <vscale x 2 x float>
+ %r = call <vscale x 2 x float> @llvm.copysign.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %tmp0)
+ ret <vscale x 2 x float> %r
+}
+
;
; llvm.copysign.nxv4f32
;
@@ -230,6 +243,19 @@ define <vscale x 4 x float> @copysign_nxv4f32_nxv4f64(<vscale x 4 x float> %a, <
ret <vscale x 4 x float> %r
}
+define <vscale x 4 x float> @copysign_nxv4f32_nxv4bf16(<vscale x 4 x float> %a, <vscale x 4 x bfloat> %b) {
+; CHECK-LABEL: copysign_nxv4f32_nxv4bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: lsl z1.s, z1.s, #16
+; CHECK-NEXT: and z0.s, z0.s, #0x7fffffff
+; CHECK-NEXT: and z1.s, z1.s, #0x80000000
+; CHECK-NEXT: orr z0.d, z0.d, z1.d
+; CHECK-NEXT: ret
+ %tmp0 = fpext <vscale x 4 x bfloat> %b to <vscale x 4 x float>
+ %r = call <vscale x 4 x float> @llvm.copysign.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %tmp0)
+ ret <vscale x 4 x float> %r
+}
+
;
; llvm.copysign.nxv2f64
;
@@ -273,9 +299,137 @@ define <vscale x 2 x double> @copysign_nxv2f64_nxv2f64(<vscale x 2 x double> %a,
ret <vscale x 2 x double> %r
}
+define <vscale x 2 x double> @copysign_nxv2f64_nxv2bf16(<vscale x 2 x double> %a, <vscale x 2 x bfloat> %b) {
+; CHECK-LABEL: copysign_nxv2f64_nxv2bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: lsl z1.s, z1.s, #16
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: and z0.d, z0.d, #0x7fffffffffffffff
+; CHECK-NEXT: fcvt z1.d, p0/m, z1.s
+; CHECK-NEXT: and z1.d, z1.d, #0x8000000000000000
+; CHECK-NEXT: orr z0.d, z0.d, z1.d
+; CHECK-NEXT: ret
+ %b.ext = fpext <vscale x 2 x bfloat> %b to <vscale x 2 x double>
+ %r = call <vscale x 2 x double> @llvm.copysign.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b.ext)
+ ret <vscale x 2 x double> %r
+}
+
+;
+; llvm.copysign.nxv2bf16
+;
+
+define <vscale x 2 x bfloat> @copysign_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %a, <vscale x 2 x bfloat> %b) {
+; CHECK-LABEL: copysign_nxv2bf16_nxv2bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: and z1.h, z1.h, #0x8000
+; CHECK-NEXT: and z0.h, z0.h, #0x7fff
+; CHECK-NEXT: orr z0.d, z0.d, z1.d
+; CHECK-NEXT: ret
+ %r = call <vscale x 2 x bfloat> @llvm.copysign.nxv2bf16(<vscale x 2 x bfloat> %a, <vscale x 2 x bfloat> %b)
+ ret <vscale x 2 x bfloat> %r
+}
+
+define <vscale x 2 x bfloat> @copysign_nxv2bf16_nxv2f32(<vscale x 2 x bfloat> %a, <vscale x 2 x float> %b) {
+; CHECK-LABEL: copysign_nxv2bf16_nxv2f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: and z0.h, z0.h, #0x7fff
+; CHECK-NEXT: bfcvt z1.h, p0/m, z1.s
+; CHECK-NEXT: and z1.h, z1.h, #0x8000
+; CHECK-NEXT: orr z0.d, z0.d, z1.d
+; CHECK-NEXT: ret
+ %tmp0 = fptrunc <vscale x 2 x float> %b to <vscale x 2 x bfloat>
+ %r = call <vscale x 2 x bfloat> @llvm.copysign.nxv2bf16(<vscale x 2 x bfloat> %a, <vscale x 2 x bfloat> %tmp0)
+ ret <vscale x 2 x bfloat> %r
+}
+
+; TODO: Cannot downconvert from double to bfloat
+;define <vscale x 2 x bfloat> @copysign_nxv2bf16_nxv2f64(<vscale x 2 x bfloat> %a, <vscale x 2 x double> %b) {
+; %tmp0 = fptrunc <vscale x 2 x double> %b to <vscale x 2 x bfloat>
+; %r = call <vscale x 2 x bfloat> @llvm.copysign.nxv2bf16(<vscale x 2 x bfloat> %a, <vscale x 2 x bfloat> %tmp0)
+; ret <vscale x 2 x bfloat> %r
+;}
+
+;
+; llvm.copysign.nxv2bf16
+;
+
+define <vscale x 4 x bfloat> @copysign_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %a, <vscale x 4 x bfloat> %b) {
+; CHECK-LABEL: copysign_nxv4bf16_nxv4bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: and z1.h, z1.h, #0x8000
+; CHECK-NEXT: and z0.h, z0.h, #0x7fff
+; CHECK-NEXT: orr z0.d, z0.d, z1.d
+; CHECK-NEXT: ret
+ %r = call <vscale x 4 x bfloat> @llvm.copysign.nxv4bf16(<vscale x 4 x bfloat> %a, <vscale x 4 x bfloat> %b)
+ ret <vscale x 4 x bfloat> %r
+}
+
+define <vscale x 4 x bfloat> @copysign_nxv4bf16_nxv4f32(<vscale x 4 x bfloat> %a, <vscale x 4 x float> %b) {
+; CHECK-LABEL: copysign_nxv4bf16_nxv4f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: and z0.h, z0.h, #0x7fff
+; CHECK-NEXT: bfcvt z1.h, p0/m, z1.s
+; CHECK-NEXT: and z1.h, z1.h, #0x8000
+; CHECK-NEXT: orr z0.d, z0.d, z1.d
+; CHECK-NEXT: ret
+ %b.trunc = fptrunc <vscale x 4 x float> %b to <vscale x 4 x bfloat>
+ %r = call <vscale x 4 x bfloat> @llvm.copysign.nxv4bf16(<vscale x 4 x bfloat> %a, <vscale x 4 x bfloat> %b.trunc)
+ ret <vscale x 4 x bfloat> %r
+}
+
+; TODO: Cannot downconvert from double to bfloat
+;define <vscale x 4 x bfloat> @copysign_nxv4bf16_nxv4f64(<vscale x 4 x bfloat> %a, <vscale x 4 x double> %b) {
+; %b.trunc = fptrunc <vscale x 4 x double> %b to <vscale x 4 x bfloat>
+; %r = call <vscale x 4 x bfloat> @llvm.copysign.nxv4bf16(<vscale x 4 x bfloat> %a, <vscale x 4 x bfloat> %b.trunc)
+; ret <vscale x 4 x bfloat> %r
+;}
+
+;
+; llvm.copysign.nxv8bf16
+;
+
+define <vscale x 8 x bfloat> @copysign_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) {
+; CHECK-LABEL: copysign_nxv8bf16_nxv8bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: and z1.h, z1.h, #0x8000
+; CHECK-NEXT: and z0.h, z0.h, #0x7fff
+; CHECK-NEXT: orr z0.d, z0.d, z1.d
+; CHECK-NEXT: ret
+ %r = call <vscale x 8 x bfloat> @llvm.copysign.nxv8bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b)
+ ret <vscale x 8 x bfloat> %r
+}
+
+define <vscale x 8 x bfloat> @copysign_nxv8bf16_nxv8f32(<vscale x 8 x bfloat> %a, <vscale x 8 x float> %b) {
+; CHECK-LABEL: copysign_nxv8bf16_nxv8f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: and z0.h, z0.h, #0x7fff
+; CHECK-NEXT: bfcvt z2.h, p0/m, z2.s
+; CHECK-NEXT: bfcvt z1.h, p0/m, z1.s
+; CHECK-NEXT: uzp1 z1.h, z1.h, z2.h
+; CHECK-NEXT: and z1.h, z1.h, #0x8000
+; CHECK-NEXT: orr z0.d, z0.d, z1.d
+; CHECK-NEXT: ret
+ %b.trunc = fptrunc <vscale x 8 x float> %b to <vscale x 8 x bfloat>
+ %r = call <vscale x 8 x bfloat> @llvm.copysign.nxv8bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b.trunc)
+ ret <vscale x 8 x bfloat> %r
+}
+
+; TODO: Cannot downconvert from double to bfloat
+;define <vscale x 8 x bfloat> @copysign_nxv8bf16_nxv8f64(<vscale x 8 x bfloat> %a, <vscale x 8 x double> %b) {
+; %b.trunc = fptrunc <vscale x 8 x double> %b to <vscale x 8 x bfloat>
+; %r = call <vscale x 8 x bfloat> @llvm.copysign.nxv8bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b.trunc)
+; ret <vscale x 8 x bfloat> %r
+;}
+
declare <vscale x 2 x half> @llvm.copysign.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b)
declare <vscale x 4 x half> @llvm.copysign.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b)
declare <vscale x 8 x half> @llvm.copysign.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b)
declare <vscale x 2 x float> @llvm.copysign.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b)
declare <vscale x 4 x float> @llvm.copysign.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b)
declare <vscale x 2 x double> @llvm.copysign.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b)
+declare <vscale x 2 x bfloat> @llvm.copysign.nxv2bf16(<vscale x 2 x bfloat> %a, <vscale x 2 x bfloat> %b)
+declare <vscale x 4 x bfloat> @llvm.copysign.nxv4bf16(<vscale x 4 x bfloat> %a, <vscale x 4 x bfloat> %b)
+declare <vscale x 8 x bfloat> @llvm.copysign.nxv8bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b)
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fcopysign.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fcopysign.ll
index 79921e25caf53..381c67c6d749e 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fcopysign.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fcopysign.ll
@@ -55,40 +55,21 @@ define void @test_copysign_f16(ptr %ap, ptr %bp) {
define void @test_copysign_bf16(ptr %ap, ptr %bp) {
; SVE-LABEL: test_copysign_bf16:
; SVE: // %bb.0:
-; SVE-NEXT: sub sp, sp, #16
-; SVE-NEXT: .cfi_def_cfa_offset 16
-; SVE-NEXT: ldr h0, [x0]
-; SVE-NEXT: ldr h1, [x1]
-; SVE-NEXT: fmov w8, s0
-; SVE-NEXT: str h1, [sp, #12]
-; SVE-NEXT: ldrb w9, [sp, #13]
-; SVE-NEXT: and w8, w8, #0x7fff
-; SVE-NEXT: tst w9, #0x80
-; SVE-NEXT: fmov s0, w8
-; SVE-NEXT: eor w8, w8, #0x8000
-; SVE-NEXT: fmov s1, w8
-; SVE-NEXT: fcsel h0, h1, h0, ne
+; SVE-NEXT: ldr h0, [x1]
+; SVE-NEXT: ldr h1, [x0]
+; SVE-NEXT: and z0.h, z0.h, #0x8000
+; SVE-NEXT: and z1.h, z1.h, #0x7fff
+; SVE-NEXT: orr z0.d, z1.d, z0.d
; SVE-NEXT: str h0, [x0]
-; SVE-NEXT: add sp, sp, #16
; SVE-NEXT: ret
;
; SVE2-LABEL: test_copysign_bf16:
; SVE2: // %bb.0:
-; SVE2-NEXT: sub sp, sp, #16
-; SVE2-NEXT: .cfi_def_cfa_offset 16
-; SVE2-NEXT: ldr h0, [x0]
+; SVE2-NEXT: mov z0.h, #32767 // =0x7fff
; SVE2-NEXT: ldr h1, [x1]
-; SVE2-NEXT: fmov w8, s0
-; SVE2-NEXT: str h1, [sp, #12]
-; SVE2-NEXT: ldrb w9, [sp, #13]
-; SVE2-NEXT: and w8, w8, #0x7fff
-; SVE2-NEXT: tst w9, #0x80
-; SVE2-NEXT: fmov s0, w8
-; SVE2-NEXT: eor w8, w8, #0x8000
-; SVE2-NEXT: fmov s1, w8
-; SVE2-NEXT: fcsel h0, h1, h0, ne
-; SVE2-NEXT: str h0, [x0]
-; SVE2-NEXT: add sp, sp, #16
+; SVE2-NEXT: ldr h2, [x0]
+; SVE2-NEXT: bsl z2.d, z2.d, z1.d, z0.d
+; SVE2-NEXT: str h2, [x0]
; SVE2-NEXT: ret
;
; NONEON-NOSVE-LABEL: test_copysign_bf16:
diff --git a/llvm/test/CodeGen/AArch64/sve2-fcopysign.ll b/llvm/test/CodeGen/AArch64/sve2-fcopysign.ll
index 624e4ef03f440..8510f16ba52c9 100644
--- a/llvm/test/CodeGen/AArch64/sve2-fcopysign.ll
+++ b/llvm/test/CodeGen/AArch64/sve2-fcopysign.ll
@@ -173,6 +173,18 @@ define <vscale x 2 x float> @copysign_nxv2f32_nxv2f64(<vscale x 2 x float> %a, <
ret <vscale x 2 x float> %r
}
+define <vscale x 2 x float> @copysign_nxv2f32_nxv2bf16(<vscale x 2 x float> %a, <vscale x 2 x bfloat> %b) {
+; CHECK-LABEL: copysign_nxv2f32_nxv2bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z2.s, #0x7fffffff
+; CHECK-NEXT: lsl z1.s, z1.s, #16
+; CHECK-NEXT: bsl z0.d, z0.d, z1.d, z2.d
+; CHECK-NEXT: ret
+ %tmp0 = fpext <vscale x 2 x bfloat> %b to <vscale x 2 x float>
+ %r = call <vscale x 2 x float> @llvm.copysign.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %tmp0)
+ ret <vscale x 2 x float> %r
+}
+
;
; llvm.copysign.nxv4f32
;
@@ -215,6 +227,18 @@ define <vscale x 4 x float> @copysign_nxv4f32_nxv4f64(<vscale x 4 x float> %a, <
ret <vscale x 4 x float> %r
}
+define <vscale x 4 x float> @copysign_nxv4f32_nxv4bf16(<vscale x 4 x float> %a, <vscale x 4 x bfloat> %b) {
+; CHECK-LABEL: copysign_nxv4f32_nxv4bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z2.s, #0x7fffffff
+; CHECK-NEXT: lsl z1.s, z1.s, #16
+; CHECK-NEXT: bsl z0.d, z0.d, z1.d, z2.d
+; CHECK-NEXT: ret
+ %tmp0 = fpext <vscale x 4 x bfloat> %b to <vscale x 4 x float>
+ %r = call <vscale x 4 x float> @llvm.copysign.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %tmp0)
+ ret <vscale x 4 x float> %r
+}
+
;
; llvm.copysign.nxv2f64
;
@@ -255,9 +279,163 @@ define <vscale x 2 x double> @copysign_nxv2f64_nxv2f64(<vscale x 2 x double> %a,
ret <vscale x 2 x double> %r
}
+define <vscale x 2 x double> @copysign_nxv2f64_nxv2bf16(<vscale x 2 x double> %a, <vscale x 2 x bfloat> %b) {
+; CHECK-LABEL: copysign_nxv2f64_nxv2bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: lsl z1.s, z1.s, #16
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: mov z2.d, #0x7fffffffffffffff
+; CHECK-NEXT: fcvt z1.d, p0/m, z1.s
+; CHECK-NEXT: bsl z0.d, z0.d, z1.d, z2.d
+; CHECK-NEXT: ret
+ %b.ext = fpext <vscale x 2 x bfloat> %b to <vscale x 2 x double>
+ %r = call <vscale x 2 x double> @llvm.copysign.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b.ext)
+ ret <vscale x 2 x double> %r
+}
+
+;
+; llvm.copysign.nxv2bf16
+;
+
+define <vscale x 2 x bfloat> @copysign_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %a, <vscale x 2 x bfloat> %b) {
+; CHECK-LABEL: copysign_nxv2bf16_nxv2bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z2.h, #32767 // =0x7fff
+; CHECK-NEXT: bsl z0.d, z0.d, z1.d, z2.d
+; CHECK-NEXT: ret
+ %r = call <vscale x 2 x bfloat> @llvm.copysign.nxv2bf16(<vscale x 2 x bfloat> %a, <vscale x 2 x bfloat> %b)
+ ret <vscale x 2 x bfloat> %r
+}
+
+define <vscale x 2 x bfloat> @copysign_nxv2bf16_nxv2f32(<vscale x 2 x bfloat> %a, <vscale x 2 x float> %b) {
+; CHECK-LABEL: copysign_nxv2bf16_nxv2f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: mov z2.h, #32767 // =0x7fff
+; CHECK-NEXT: bfcvt z1.h, p0/m, z1.s
+; CHECK-NEXT: bsl z0.d, z0.d, z1.d, z2.d
+; CHECK-NEXT: ret
+ %tmp0 = fptrunc <vscale x 2 x float> %b to <vscale x 2 x bfloat>
+ %r = call <vscale x 2 x bfloat> @llvm.copysign.nxv2bf16(<vscale x 2 x bfloat> %a, <vscale x 2 x bfloat> %tmp0)
+ ret <vscale x 2 x bfloat> %r
+}
+
+define <vscale x 2 x bfloat> @copysign_nxv2bf16_nxv2f64(<vscale x 2 x bfloat> %a, <vscale x 2 x double> %b) {
+; CHECK-LABEL: copysign_nxv2bf16_nxv2f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: mov z2.h, #32767 // =0x7fff
+; CHECK-NEXT: fcvtx z1.s, p0/m, z1.d
+; CHECK-NEXT: bfcvt z1.h, p0/m, z1.s
+; CHECK-NEXT: bsl z0.d, z0.d, z1.d, z2.d
+; CHECK-NEXT: ret
+ %tmp0 = fptrunc <vscale x 2 x double> %b to <vscale x 2 x bfloat>
+ %r = call <vscale x 2 x bfloat> @llvm.copysign.nxv2bf16(<vscale x 2 x bfloat> %a, <vscale x 2 x bfloat> %tmp0)
+ ret <vscale x 2 x bfloat> %r
+}
+
+;
+; llvm.copysign.nxv2bf16
+;
+
+define <vscale x 4 x bfloat> @copysign_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %a, <vscale x 4 x bfloat> %b) {
+; CHECK-LABEL: copysign_nxv4bf16_nxv4bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z2.h, #32767 // =0x7fff
+; CHECK-NEXT: bsl z0.d, z0.d, z1.d, z2.d
+; CHECK-NEXT: ret
+ %r = call <vscale x 4 x bfloat> @llvm.copysign.nxv4bf16(<vscale x 4 x bfloat> %a, <vscale x 4 x bfloat> %b)
+ ret <vscale x 4 x bfloat> %r
+}
+
+define <vscale x 4 x bfloat> @copysign_nxv4bf16_nxv4f32(<vscale x 4 x bfloat> %a, <vscale x 4 x float> %b) {
+; CHECK-LABEL: copysign_nxv4bf16_nxv4f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: mov z2.h, #32767 // =0x7fff
+; CHECK-NEXT: bfcvt z1.h, p0/m, z1.s
+; CHECK-NEXT: bsl z0.d, z0.d, z1.d, z2.d
+; CHECK-NEXT: ret
+ %b.trunc = fptrunc <vscale x 4 x float> %b to <vscale x 4 x bfloat>
+ %r = call <vscale x 4 x bfloat> @llvm.copysign.nxv4bf16(<vscale x 4 x bfloat> %a, <vscale x 4 x bfloat> %b.trunc)
+ ret <vscale x 4 x bfloat> %r
+}
+
+define <vscale x 4 x bfloat> @copysign_nxv4bf16_nxv4f64(<vscale x 4 x bfloat> %a, <vscale x 4 x double> %b) {
+; CHECK-LABEL: copysign_nxv4bf16_nxv4f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: mov z3.h, #32767 // =0x7fff
+; CHECK-NEXT: fcvtx z2.s, p0/m, z2.d
+; CHECK-NEXT: fcvtx z1.s, p0/m, z1.d
+; CHECK-NEXT: bfcvt z2.h, p0/m, z2.s
+; CHECK-NEXT: bfcvt z1.h, p0/m, z1.s
+; CHECK-NEXT: uzp1 z1.s, z1.s, z2.s
+; CHECK-NEXT: bsl z0.d, z0.d, z1.d, z3.d
+; CHECK-NEXT: ret
+ %b.trunc = fptrunc <vscale x 4 x double> %b to <vscale x 4 x bfloat>
+ %r = call <vscale x 4 x bfloat> @llvm.copysign.nxv4bf16(<vscale x 4 x bfloat> %a, <vscale x 4 x bfloat> %b.trunc)
+ ret <vscale x 4 x bfloat> %r
+}
+
+;
+; llvm.copysign.nxv8bf16
+;
+
+define <vscale x 8 x bfloat> @copysign_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) {
+; CHECK-LABEL: copysign_nxv8bf16_nxv8bf16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z2.h, #32767 // =0x7fff
+; CHECK-NEXT: bsl z0.d, z0.d, z1.d, z2.d
+; CHECK-NEXT: ret
+ %r = call <vscale x 8 x bfloat> @llvm.copysign.nxv8bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b)
+ ret <vscale x 8 x bfloat> %r
+}
+
+define <vscale x 8 x bfloat> @copysign_nxv8bf16_nxv8f32(<vscale x 8 x bfloat> %a, <vscale x 8 x float> %b) {
+; CHECK-LABEL: copysign_nxv8bf16_nxv8f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: mov z3.h, #32767 // =0x7fff
+; CHECK-NEXT: bfcvt z2.h, p0/m, z2.s
+; CHECK-NEXT: bfcvt z1.h, p0/m, z1.s
+; CHECK-NEXT: uzp1 z1.h, z1.h, z2.h
+; CHECK-NEXT: bsl z0.d, z0.d, z1.d, z3.d
+; CHECK-NEXT: ret
+ %b.trunc = fptrunc <vscale x 8 x float> %b to <vscale x 8 x bfloat>
+ %r = call <vscale x 8 x bfloat> @llvm.copysign.nxv8bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b.trunc)
+ ret <vscale x 8 x bfloat> %r
+}
+
+define <vscale x 8 x bfloat> @copysign_nxv8bf16_nxv8f64(<vscale x 8 x bfloat> %a, <vscale x 8 x double> %b) {
+; CHECK-LABEL: copysign_nxv8bf16_nxv8f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: fcvtx z4.s, p0/m, z4.d
+; CHECK-NEXT: fcvtx z3.s, p0/m, z3.d
+; CHECK-NEXT: fcvtx z2.s, p0/m, z2.d
+; CHECK-NEXT: fcvtx z1.s, p0/m, z1.d
+; CHECK-NEXT: bfcvt z4.h, p0/m, z4.s
+; CHECK-NEXT: bfcvt z3.h, p0/m, z3.s
+; CHECK-NEXT: bfcvt z2.h, p0/m, z2.s
+; CHECK-NEXT: bfcvt z1.h, p0/m, z1.s
+; CHECK-NEXT: uzp1 z3.s, z3.s, z4.s
+; CHECK-NEXT: uzp1 z1.s, z1.s, z2.s
+; CHECK-NEXT: mov z2.h, #32767 // =0x7fff
+; CHECK-NEXT: uzp1 z1.h, z1.h, z3.h
+; CHECK-NEXT: bsl z0.d, z0.d, z1.d, z2.d
+; CHECK-NEXT: ret
+ %b.trunc = fptrunc <vscale x 8 x double> %b to <vscale x 8 x bfloat>
+ %r = call <vscale x 8 x bfloat> @llvm.copysign.nxv8bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b.trunc)
+ ret <vscale x 8 x bfloat> %r
+}
+
declare <vscale x 2 x half> @llvm.copysign.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b)
declare <vscale x 4 x half> @llvm.copysign.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b)
declare <vscale x 8 x half> @llvm.copysign.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b)
declare <vscale x 2 x float> @llvm.copysign.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b)
declare <vscale x 4 x float> @llvm.copysign.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b)
declare <vscale x 2 x double> @llvm.copysign.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b)
+declare <vscale x 2 x bfloat> @llvm.copysign.nxv2bf16(<vscale x 2 x bfloat> %a, <vscale x 2 x bfloat> %b)
+declare <vscale x 4 x bfloat> @llvm.copysign.nxv4bf16(<vscale x 4 x bfloat> %a, <vscale x 4 x bfloat> %b)
+declare <vscale x 8 x bfloat> @llvm.copysign.nxv8bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b)
More information about the llvm-commits
mailing list