[llvm] f6c2322 - [RISCV] Promote fixed-length bf16 arith vector ops with zvfbfmin (#112393)
via llvm-commits
llvm-commits at lists.llvm.org
Tue Oct 15 14:49:08 PDT 2024
Author: Luke Lau
Date: 2024-10-15T22:49:05+01:00
New Revision: f6c23222a4fe7291a7464460216aaad8f778947b
URL: https://github.com/llvm/llvm-project/commit/f6c23222a4fe7291a7464460216aaad8f778947b
DIFF: https://github.com/llvm/llvm-project/commit/f6c23222a4fe7291a7464460216aaad8f778947b.diff
LOG: [RISCV] Promote fixed-length bf16 arith vector ops with zvfbfmin (#112393)
The aim is to have the same set of promotions on fixed-length bf16
vectors as on fixed-length f16 vectors, and then deduplicate them
similarly to what was done for scalable vectors.
It looks like fneg/fabs/fcopysign end up getting expanded because fsub
is now legal, and the default operation action must be expand.
Added:
Modified:
llvm/lib/Target/RISCV/RISCVISelLowering.cpp
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index cde690793f0702..bf333b7b790167 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1379,7 +1379,14 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setOperationAction(
{ISD::VP_MERGE, ISD::VP_SELECT, ISD::VSELECT, ISD::SELECT}, VT,
Custom);
- // TODO: Promote to fp32.
+ MVT F32VecVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
+ // Don't promote f16 vector operations to f32 if f32 vector type is
+ // not legal.
+ // TODO: could split the f16 vector into two vectors and do promotion.
+ if (!isTypeLegal(F32VecVT))
+ continue;
+ setOperationPromotedToType(ZvfhminZvfbfminPromoteOps, VT, F32VecVT);
+ // TODO: Promote VP ops to fp32.
continue;
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
index 7ecf8af54c8dc0..c24ade1e6d8eff 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
@@ -1,8 +1,52 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
-; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
-; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zvfhmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
-; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zvfhmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zvfh,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zvfh,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zvfhmin,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zvfhmin,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+
+
+define void @fadd_v8bf16(ptr %x, ptr %y) {
+; CHECK-LABEL: fadd_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v12, v10
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = load <8 x bfloat>, ptr %y
+ %c = fadd <8 x bfloat> %a, %b
+ store <8 x bfloat> %c, ptr %x
+ ret void
+}
+
+define void @fadd_v6bf16(ptr %x, ptr %y) {
+; CHECK-LABEL: fadd_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v12, v10
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = load <6 x bfloat>, ptr %y
+ %c = fadd <6 x bfloat> %a, %b
+ store <6 x bfloat> %c, ptr %x
+ ret void
+}
define void @fadd_v8f16(ptr %x, ptr %y) {
; ZVFH-LABEL: fadd_v8f16:
@@ -97,6 +141,49 @@ define void @fadd_v2f64(ptr %x, ptr %y) {
ret void
}
+define void @fsub_v8bf16(ptr %x, ptr %y) {
+; CHECK-LABEL: fsub_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfsub.vv v8, v12, v10
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = load <8 x bfloat>, ptr %y
+ %c = fsub <8 x bfloat> %a, %b
+ store <8 x bfloat> %c, ptr %x
+ ret void
+}
+
+define void @fsub_v6bf16(ptr %x, ptr %y) {
+; CHECK-LABEL: fsub_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfsub.vv v8, v12, v10
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = load <6 x bfloat>, ptr %y
+ %c = fsub <6 x bfloat> %a, %b
+ store <6 x bfloat> %c, ptr %x
+ ret void
+}
+
define void @fsub_v8f16(ptr %x, ptr %y) {
; ZVFH-LABEL: fsub_v8f16:
; ZVFH: # %bb.0:
@@ -190,6 +277,49 @@ define void @fsub_v2f64(ptr %x, ptr %y) {
ret void
}
+define void @fmul_v8bf16(ptr %x, ptr %y) {
+; CHECK-LABEL: fmul_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfmul.vv v8, v12, v10
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = load <8 x bfloat>, ptr %y
+ %c = fmul <8 x bfloat> %a, %b
+ store <8 x bfloat> %c, ptr %x
+ ret void
+}
+
+define void @fmul_v6bf16(ptr %x, ptr %y) {
+; CHECK-LABEL: fmul_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfmul.vv v8, v12, v10
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = load <6 x bfloat>, ptr %y
+ %c = fmul <6 x bfloat> %a, %b
+ store <6 x bfloat> %c, ptr %x
+ ret void
+}
+
define void @fmul_v8f16(ptr %x, ptr %y) {
; ZVFH-LABEL: fmul_v8f16:
; ZVFH: # %bb.0:
@@ -283,6 +413,49 @@ define void @fmul_v2f64(ptr %x, ptr %y) {
ret void
}
+define void @fdiv_v8bf16(ptr %x, ptr %y) {
+; CHECK-LABEL: fdiv_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfdiv.vv v8, v12, v10
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = load <8 x bfloat>, ptr %y
+ %c = fdiv <8 x bfloat> %a, %b
+ store <8 x bfloat> %c, ptr %x
+ ret void
+}
+
+define void @fdiv_v6bf16(ptr %x, ptr %y) {
+; CHECK-LABEL: fdiv_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfdiv.vv v8, v12, v10
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = load <6 x bfloat>, ptr %y
+ %c = fdiv <6 x bfloat> %a, %b
+ store <6 x bfloat> %c, ptr %x
+ ret void
+}
+
define void @fdiv_v8f16(ptr %x, ptr %y) {
; ZVFH-LABEL: fdiv_v8f16:
; ZVFH: # %bb.0:
@@ -376,6 +549,36 @@ define void @fdiv_v2f64(ptr %x, ptr %y) {
ret void
}
+define void @fneg_v8bf16(ptr %x) {
+; CHECK-LABEL: fneg_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: lui a1, 8
+; CHECK-NEXT: vxor.vx v8, v8, a1
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = fneg <8 x bfloat> %a
+ store <8 x bfloat> %b, ptr %x
+ ret void
+}
+
+define void @fneg_v6bf16(ptr %x) {
+; CHECK-LABEL: fneg_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: lui a1, 8
+; CHECK-NEXT: vxor.vx v8, v8, a1
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = fneg <6 x bfloat> %a
+ store <6 x bfloat> %b, ptr %x
+ ret void
+}
+
define void @fneg_v8f16(ptr %x) {
; ZVFH-LABEL: fneg_v8f16:
; ZVFH: # %bb.0:
@@ -450,6 +653,38 @@ define void @fneg_v2f64(ptr %x) {
ret void
}
+define void @fabs_v8bf16(ptr %x) {
+; CHECK-LABEL: fabs_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: lui a1, 8
+; CHECK-NEXT: addi a1, a1, -1
+; CHECK-NEXT: vand.vx v8, v8, a1
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = call <8 x bfloat> @llvm.fabs.v8bf16(<8 x bfloat> %a)
+ store <8 x bfloat> %b, ptr %x
+ ret void
+}
+
+define void @fabs_v6bf16(ptr %x) {
+; CHECK-LABEL: fabs_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: lui a1, 8
+; CHECK-NEXT: addi a1, a1, -1
+; CHECK-NEXT: vand.vx v8, v8, a1
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = call <6 x bfloat> @llvm.fabs.v6bf16(<6 x bfloat> %a)
+ store <6 x bfloat> %b, ptr %x
+ ret void
+}
+
define void @fabs_v8f16(ptr %x) {
; ZVFH-LABEL: fabs_v8f16:
; ZVFH: # %bb.0:
@@ -473,7 +708,6 @@ define void @fabs_v8f16(ptr %x) {
store <8 x half> %b, ptr %x
ret void
}
-declare <8 x half> @llvm.fabs.v8f16(<8 x half>)
define void @fabs_v6f16(ptr %x) {
; ZVFH-LABEL: fabs_v6f16:
@@ -498,7 +732,6 @@ define void @fabs_v6f16(ptr %x) {
store <6 x half> %b, ptr %x
ret void
}
-declare <6 x half> @llvm.fabs.v6f16(<6 x half>)
define void @fabs_v4f32(ptr %x) {
; CHECK-LABEL: fabs_v4f32:
@@ -513,7 +746,6 @@ define void @fabs_v4f32(ptr %x) {
store <4 x float> %b, ptr %x
ret void
}
-declare <4 x float> @llvm.fabs.v4f32(<4 x float>)
define void @fabs_v2f64(ptr %x) {
; CHECK-LABEL: fabs_v2f64:
@@ -528,7 +760,48 @@ define void @fabs_v2f64(ptr %x) {
store <2 x double> %b, ptr %x
ret void
}
-declare <2 x double> @llvm.fabs.v2f64(<2 x double>)
+
+define void @copysign_v8bf16(ptr %x, ptr %y) {
+; CHECK-LABEL: copysign_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: lui a1, 8
+; CHECK-NEXT: vand.vx v8, v8, a1
+; CHECK-NEXT: addi a1, a1, -1
+; CHECK-NEXT: vand.vx v9, v9, a1
+; CHECK-NEXT: vor.vv v8, v9, v8
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = load <8 x bfloat>, ptr %y
+ %c = call <8 x bfloat> @llvm.copysign.v8bf16(<8 x bfloat> %a, <8 x bfloat> %b)
+ store <8 x bfloat> %c, ptr %x
+ ret void
+}
+
+define void @copysign_v6bf16(ptr %x, ptr %y) {
+; CHECK-LABEL: copysign_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: lui a1, 8
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vand.vx v8, v8, a1
+; CHECK-NEXT: addi a1, a1, -1
+; CHECK-NEXT: vand.vx v9, v9, a1
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vor.vv v8, v9, v8
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = load <6 x bfloat>, ptr %y
+ %c = call <6 x bfloat> @llvm.copysign.v6bf16(<6 x bfloat> %a, <6 x bfloat> %b)
+ store <6 x bfloat> %c, ptr %x
+ ret void
+}
define void @copysign_v8f16(ptr %x, ptr %y) {
; ZVFH-LABEL: copysign_v8f16:
@@ -558,7 +831,6 @@ define void @copysign_v8f16(ptr %x, ptr %y) {
store <8 x half> %c, ptr %x
ret void
}
-declare <8 x half> @llvm.copysign.v8f16(<8 x half>, <8 x half>)
define void @copysign_v6f16(ptr %x, ptr %y) {
; ZVFH-LABEL: copysign_v6f16:
@@ -590,7 +862,6 @@ define void @copysign_v6f16(ptr %x, ptr %y) {
store <6 x half> %c, ptr %x
ret void
}
-declare <6 x half> @llvm.copysign.v6f16(<6 x half>, <6 x half>)
define void @copysign_v4f32(ptr %x, ptr %y) {
; CHECK-LABEL: copysign_v4f32:
@@ -607,7 +878,6 @@ define void @copysign_v4f32(ptr %x, ptr %y) {
store <4 x float> %c, ptr %x
ret void
}
-declare <4 x float> @llvm.copysign.v4f32(<4 x float>, <4 x float>)
define void @copysign_v2f64(ptr %x, ptr %y) {
; CHECK-LABEL: copysign_v2f64:
@@ -624,7 +894,52 @@ define void @copysign_v2f64(ptr %x, ptr %y) {
store <2 x double> %c, ptr %x
ret void
}
-declare <2 x double> @llvm.copysign.v2f64(<2 x double>, <2 x double>)
+
+define void @copysign_vf_v8bf16(ptr %x, bfloat %y) {
+; CHECK-LABEL: copysign_vf_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: fmv.x.w a1, fa0
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vmv.v.x v9, a1
+; CHECK-NEXT: lui a1, 8
+; CHECK-NEXT: addi a2, a1, -1
+; CHECK-NEXT: vand.vx v8, v8, a2
+; CHECK-NEXT: vand.vx v9, v9, a1
+; CHECK-NEXT: vor.vv v8, v8, v9
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = insertelement <8 x bfloat> poison, bfloat %y, i32 0
+ %c = shufflevector <8 x bfloat> %b, <8 x bfloat> poison, <8 x i32> zeroinitializer
+ %d = call <8 x bfloat> @llvm.copysign.v8bf16(<8 x bfloat> %a, <8 x bfloat> %c)
+ store <8 x bfloat> %d, ptr %x
+ ret void
+}
+
+define void @copysign_vf_v6bf16(ptr %x, bfloat %y) {
+; CHECK-LABEL: copysign_vf_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: fmv.x.w a1, fa0
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a1
+; CHECK-NEXT: lui a1, 8
+; CHECK-NEXT: addi a2, a1, -1
+; CHECK-NEXT: vand.vx v8, v8, a2
+; CHECK-NEXT: vand.vx v9, v9, a1
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vor.vv v8, v8, v9
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = insertelement <6 x bfloat> poison, bfloat %y, i32 0
+ %c = shufflevector <6 x bfloat> %b, <6 x bfloat> poison, <6 x i32> zeroinitializer
+ %d = call <6 x bfloat> @llvm.copysign.v6bf16(<6 x bfloat> %a, <6 x bfloat> %c)
+ store <6 x bfloat> %d, ptr %x
+ ret void
+}
define void @copysign_vf_v8f16(ptr %x, half %y) {
; ZVFH-LABEL: copysign_vf_v8f16:
@@ -720,6 +1035,52 @@ define void @copysign_vf_v2f64(ptr %x, double %y) {
ret void
}
+define void @copysign_neg_v8bf16(ptr %x, ptr %y) {
+; CHECK-LABEL: copysign_neg_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: lui a1, 8
+; CHECK-NEXT: vxor.vx v8, v8, a1
+; CHECK-NEXT: addi a2, a1, -1
+; CHECK-NEXT: vand.vx v9, v9, a2
+; CHECK-NEXT: vand.vx v8, v8, a1
+; CHECK-NEXT: vor.vv v8, v9, v8
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = load <8 x bfloat>, ptr %y
+ %c = fneg <8 x bfloat> %b
+ %d = call <8 x bfloat> @llvm.copysign.v8bf16(<8 x bfloat> %a, <8 x bfloat> %c)
+ store <8 x bfloat> %d, ptr %x
+ ret void
+}
+
+define void @copysign_neg_v6bf16(ptr %x, ptr %y) {
+; CHECK-LABEL: copysign_neg_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: lui a1, 8
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vxor.vx v8, v8, a1
+; CHECK-NEXT: addi a2, a1, -1
+; CHECK-NEXT: vand.vx v9, v9, a2
+; CHECK-NEXT: vand.vx v8, v8, a1
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vor.vv v8, v9, v8
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = load <6 x bfloat>, ptr %y
+ %c = fneg <6 x bfloat> %b
+ %d = call <6 x bfloat> @llvm.copysign.v6bf16(<6 x bfloat> %a, <6 x bfloat> %c)
+ store <6 x bfloat> %d, ptr %x
+ ret void
+}
+
define void @copysign_neg_v8f16(ptr %x, ptr %y) {
; ZVFH-LABEL: copysign_neg_v8f16:
; ZVFH: # %bb.0:
@@ -818,6 +1179,56 @@ define void @copysign_neg_v2f64(ptr %x, ptr %y) {
ret void
}
+define void @copysign_neg_trunc_v4bf16_v4f32(ptr %x, ptr %y) {
+; CHECK-LABEL: copysign_neg_trunc_v4bf16_v4f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vle32.v v9, (a1)
+; CHECK-NEXT: lui a1, 8
+; CHECK-NEXT: addi a2, a1, -1
+; CHECK-NEXT: vand.vx v8, v8, a2
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v9
+; CHECK-NEXT: vxor.vx v9, v10, a1
+; CHECK-NEXT: vand.vx v9, v9, a1
+; CHECK-NEXT: vor.vv v8, v8, v9
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
+ %a = load <4 x bfloat>, ptr %x
+ %b = load <4 x float>, ptr %y
+ %c = fneg <4 x float> %b
+ %d = fptrunc <4 x float> %c to <4 x bfloat>
+ %e = call <4 x bfloat> @llvm.copysign.v4bf16(<4 x bfloat> %a, <4 x bfloat> %d)
+ store <4 x bfloat> %e, ptr %x
+ ret void
+}
+
+define void @copysign_neg_trunc_v3bf16_v3f32(ptr %x, ptr %y) {
+; CHECK-LABEL: copysign_neg_trunc_v3bf16_v3f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 3, e16, mf2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vle32.v v9, (a1)
+; CHECK-NEXT: lui a1, 8
+; CHECK-NEXT: addi a2, a1, -1
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vand.vx v8, v8, a2
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v9
+; CHECK-NEXT: vxor.vx v9, v10, a1
+; CHECK-NEXT: vand.vx v9, v9, a1
+; CHECK-NEXT: vsetivli zero, 3, e16, mf2, ta, ma
+; CHECK-NEXT: vor.vv v8, v8, v9
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
+ %a = load <3 x bfloat>, ptr %x
+ %b = load <3 x float>, ptr %y
+ %c = fneg <3 x float> %b
+ %d = fptrunc <3 x float> %c to <3 x bfloat>
+ %e = call <3 x bfloat> @llvm.copysign.v3bf16(<3 x bfloat> %a, <3 x bfloat> %d)
+ store <3 x bfloat> %e, ptr %x
+ ret void
+}
+
define void @copysign_neg_trunc_v4f16_v4f32(ptr %x, ptr %y) {
; ZVFH-LABEL: copysign_neg_trunc_v4f16_v4f32:
; ZVFH: # %bb.0:
@@ -851,7 +1262,6 @@ define void @copysign_neg_trunc_v4f16_v4f32(ptr %x, ptr %y) {
store <4 x half> %e, ptr %x
ret void
}
-declare <4 x half> @llvm.copysign.v4f16(<4 x half>, <4 x half>)
define void @copysign_neg_trunc_v3f16_v3f32(ptr %x, ptr %y) {
; ZVFH-LABEL: copysign_neg_trunc_v3f16_v3f32:
@@ -890,7 +1300,6 @@ define void @copysign_neg_trunc_v3f16_v3f32(ptr %x, ptr %y) {
store <3 x half> %e, ptr %x
ret void
}
-declare <3 x half> @llvm.copysign.v3f16(<3 x half>, <3 x half>)
define void @copysign_neg_ext_v2f64_v2f32(ptr %x, ptr %y) {
; CHECK-LABEL: copysign_neg_ext_v2f64_v2f32:
@@ -912,6 +1321,43 @@ define void @copysign_neg_ext_v2f64_v2f32(ptr %x, ptr %y) {
ret void
}
+define void @sqrt_v8bf16(ptr %x) {
+; CHECK-LABEL: sqrt_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfsqrt.v v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = call <8 x bfloat> @llvm.sqrt.v8bf16(<8 x bfloat> %a)
+ store <8 x bfloat> %b, ptr %x
+ ret void
+}
+
+define void @sqrt_v6bf16(ptr %x) {
+; CHECK-LABEL: sqrt_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfsqrt.v v8, v10
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = call <6 x bfloat> @llvm.sqrt.v6bf16(<6 x bfloat> %a)
+ store <6 x bfloat> %b, ptr %x
+ ret void
+}
+
define void @sqrt_v8f16(ptr %x) {
; ZVFH-LABEL: sqrt_v8f16:
; ZVFH: # %bb.0:
@@ -937,7 +1383,6 @@ define void @sqrt_v8f16(ptr %x) {
store <8 x half> %b, ptr %x
ret void
}
-declare <8 x half> @llvm.sqrt.v8f16(<8 x half>)
define void @sqrt_v6f16(ptr %x) {
; ZVFH-LABEL: sqrt_v6f16:
@@ -965,7 +1410,6 @@ define void @sqrt_v6f16(ptr %x) {
store <6 x half> %b, ptr %x
ret void
}
-declare <6 x half> @llvm.sqrt.v6f16(<6 x half>)
define void @sqrt_v4f32(ptr %x) {
; CHECK-LABEL: sqrt_v4f32:
@@ -980,7 +1424,6 @@ define void @sqrt_v4f32(ptr %x) {
store <4 x float> %b, ptr %x
ret void
}
-declare <4 x float> @llvm.sqrt.v4f32(<4 x float>)
define void @sqrt_v2f64(ptr %x) {
; CHECK-LABEL: sqrt_v2f64:
@@ -995,7 +1438,55 @@ define void @sqrt_v2f64(ptr %x) {
store <2 x double> %b, ptr %x
ret void
}
-declare <2 x double> @llvm.sqrt.v2f64(<2 x double>)
+
+define void @fma_v8bf16(ptr %x, ptr %y, ptr %z) {
+; CHECK-LABEL: fma_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a2)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: vle16.v v10, (a1)
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v14, v9
+; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfmadd.vv v8, v14, v12
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = load <8 x bfloat>, ptr %y
+ %c = load <8 x bfloat>, ptr %z
+ %d = call <8 x bfloat> @llvm.fma.v8bf16(<8 x bfloat> %a, <8 x bfloat> %b, <8 x bfloat> %c)
+ store <8 x bfloat> %d, ptr %x
+ ret void
+}
+
+define void @fma_v6bf16(ptr %x, ptr %y, ptr %z) {
+; CHECK-LABEL: fma_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a2)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: vle16.v v10, (a1)
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v14, v9
+; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfmadd.vv v8, v14, v12
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = load <6 x bfloat>, ptr %y
+ %c = load <6 x bfloat>, ptr %z
+ %d = call <6 x bfloat> @llvm.fma.v6bf16(<6 x bfloat> %a, <6 x bfloat> %b, <6 x bfloat> %c)
+ store <6 x bfloat> %d, ptr %x
+ ret void
+}
define void @fma_v8f16(ptr %x, ptr %y, ptr %z) {
; ZVFH-LABEL: fma_v8f16:
@@ -1030,7 +1521,6 @@ define void @fma_v8f16(ptr %x, ptr %y, ptr %z) {
store <8 x half> %d, ptr %x
ret void
}
-declare <8 x half> @llvm.fma.v8f16(<8 x half>, <8 x half>, <8 x half>)
define void @fma_v6f16(ptr %x, ptr %y, ptr %z) {
; ZVFH-LABEL: fma_v6f16:
@@ -1066,7 +1556,6 @@ define void @fma_v6f16(ptr %x, ptr %y, ptr %z) {
store <6 x half> %d, ptr %x
ret void
}
-declare <6 x half> @llvm.fma.v6f16(<6 x half>, <6 x half>, <6 x half>)
define void @fma_v4f32(ptr %x, ptr %y, ptr %z) {
; CHECK-LABEL: fma_v4f32:
@@ -1085,7 +1574,6 @@ define void @fma_v4f32(ptr %x, ptr %y, ptr %z) {
store <4 x float> %d, ptr %x
ret void
}
-declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>)
define void @fma_v2f64(ptr %x, ptr %y, ptr %z) {
; CHECK-LABEL: fma_v2f64:
@@ -1104,7 +1592,61 @@ define void @fma_v2f64(ptr %x, ptr %y, ptr %z) {
store <2 x double> %d, ptr %x
ret void
}
-declare <2 x double> @llvm.fma.v2f64(<2 x double>, <2 x double>, <2 x double>)
+
+define void @fmsub_v8bf16(ptr %x, ptr %y, ptr %z) {
+; CHECK-LABEL: fmsub_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a2)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: vle16.v v10, (a1)
+; CHECK-NEXT: lui a1, 8
+; CHECK-NEXT: vxor.vx v8, v8, a1
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v14, v9
+; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfmadd.vv v8, v14, v12
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = load <8 x bfloat>, ptr %y
+ %c = load <8 x bfloat>, ptr %z
+ %neg = fneg <8 x bfloat> %c
+ %d = call <8 x bfloat> @llvm.fma.v8bf16(<8 x bfloat> %a, <8 x bfloat> %b, <8 x bfloat> %neg)
+ store <8 x bfloat> %d, ptr %x
+ ret void
+}
+
+define void @fmsub_v6bf16(ptr %x, ptr %y, ptr %z) {
+; CHECK-LABEL: fmsub_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a2)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: vle16.v v10, (a1)
+; CHECK-NEXT: lui a1, 8
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vxor.vx v8, v8, a1
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v14, v9
+; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfmadd.vv v8, v14, v12
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = load <6 x bfloat>, ptr %y
+ %c = load <6 x bfloat>, ptr %z
+ %neg = fneg <6 x bfloat> %c
+ %d = call <6 x bfloat> @llvm.fma.v6bf16(<6 x bfloat> %a, <6 x bfloat> %b, <6 x bfloat> %neg)
+ store <6 x bfloat> %d, ptr %x
+ ret void
+}
define void @fmsub_v8f16(ptr %x, ptr %y, ptr %z) {
; ZVFH-LABEL: fmsub_v8f16:
@@ -1220,6 +1762,27 @@ define void @fnmadd_v2f64(ptr %x, ptr %y, ptr %z) {
ret void
}
+define void @fadd_v16bf16(ptr %x, ptr %y) {
+; CHECK-LABEL: fadd_v16bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v10, (a0)
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v16, v12
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v12, v8
+; CHECK-NEXT: vse16.v v12, (a0)
+; CHECK-NEXT: ret
+ %a = load <16 x bfloat>, ptr %x
+ %b = load <16 x bfloat>, ptr %y
+ %c = fadd <16 x bfloat> %a, %b
+ store <16 x bfloat> %c, ptr %x
+ ret void
+}
+
define void @fadd_v16f16(ptr %x, ptr %y) {
; ZVFH-LABEL: fadd_v16f16:
; ZVFH: # %bb.0:
@@ -1282,6 +1845,27 @@ define void @fadd_v4f64(ptr %x, ptr %y) {
ret void
}
+define void @fsub_v16bf16(ptr %x, ptr %y) {
+; CHECK-LABEL: fsub_v16bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v10, (a0)
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vfsub.vv v8, v16, v12
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v12, v8
+; CHECK-NEXT: vse16.v v12, (a0)
+; CHECK-NEXT: ret
+ %a = load <16 x bfloat>, ptr %x
+ %b = load <16 x bfloat>, ptr %y
+ %c = fsub <16 x bfloat> %a, %b
+ store <16 x bfloat> %c, ptr %x
+ ret void
+}
+
define void @fsub_v16f16(ptr %x, ptr %y) {
; ZVFH-LABEL: fsub_v16f16:
; ZVFH: # %bb.0:
@@ -1344,6 +1928,27 @@ define void @fsub_v4f64(ptr %x, ptr %y) {
ret void
}
+define void @fmul_v16bf16(ptr %x, ptr %y) {
+; CHECK-LABEL: fmul_v16bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v10, (a0)
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vfmul.vv v8, v16, v12
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v12, v8
+; CHECK-NEXT: vse16.v v12, (a0)
+; CHECK-NEXT: ret
+ %a = load <16 x bfloat>, ptr %x
+ %b = load <16 x bfloat>, ptr %y
+ %c = fmul <16 x bfloat> %a, %b
+ store <16 x bfloat> %c, ptr %x
+ ret void
+}
+
define void @fmul_v16f16(ptr %x, ptr %y) {
; ZVFH-LABEL: fmul_v16f16:
; ZVFH: # %bb.0:
@@ -1406,6 +2011,27 @@ define void @fmul_v4f64(ptr %x, ptr %y) {
ret void
}
+define void @fdiv_v16bf16(ptr %x, ptr %y) {
+; CHECK-LABEL: fdiv_v16bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v10, (a0)
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vfdiv.vv v8, v16, v12
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v12, v8
+; CHECK-NEXT: vse16.v v12, (a0)
+; CHECK-NEXT: ret
+ %a = load <16 x bfloat>, ptr %x
+ %b = load <16 x bfloat>, ptr %y
+ %c = fdiv <16 x bfloat> %a, %b
+ store <16 x bfloat> %c, ptr %x
+ ret void
+}
+
define void @fdiv_v16f16(ptr %x, ptr %y) {
; ZVFH-LABEL: fdiv_v16f16:
; ZVFH: # %bb.0:
@@ -1468,6 +2094,21 @@ define void @fdiv_v4f64(ptr %x, ptr %y) {
ret void
}
+define void @fneg_v16bf16(ptr %x) {
+; CHECK-LABEL: fneg_v16bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: lui a1, 8
+; CHECK-NEXT: vxor.vx v8, v8, a1
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
+ %a = load <16 x bfloat>, ptr %x
+ %b = fneg <16 x bfloat> %a
+ store <16 x bfloat> %b, ptr %x
+ ret void
+}
+
define void @fneg_v16f16(ptr %x) {
; ZVFH-LABEL: fneg_v16f16:
; ZVFH: # %bb.0:
@@ -1519,6 +2160,30 @@ define void @fneg_v4f64(ptr %x) {
ret void
}
+define void @fma_v16bf16(ptr %x, ptr %y, ptr %z) {
+; CHECK-LABEL: fma_v16bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a2)
+; CHECK-NEXT: vle16.v v10, (a0)
+; CHECK-NEXT: vle16.v v12, (a1)
+; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v20, v10
+; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vfmadd.vv v8, v20, v16
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v12, v8
+; CHECK-NEXT: vse16.v v12, (a0)
+; CHECK-NEXT: ret
+ %a = load <16 x bfloat>, ptr %x
+ %b = load <16 x bfloat>, ptr %y
+ %c = load <16 x bfloat>, ptr %z
+ %d = call <16 x bfloat> @llvm.fma.v16bf16(<16 x bfloat> %a, <16 x bfloat> %b, <16 x bfloat> %c)
+ store <16 x bfloat> %d, ptr %x
+ ret void
+}
+
define void @fma_v16f16(ptr %x, ptr %y, ptr %z) {
; ZVFH-LABEL: fma_v16f16:
; ZVFH: # %bb.0:
@@ -1552,7 +2217,6 @@ define void @fma_v16f16(ptr %x, ptr %y, ptr %z) {
store <16 x half> %d, ptr %x
ret void
}
-declare <16 x half> @llvm.fma.v16f16(<16 x half>, <16 x half>, <16 x half>)
define void @fma_v8f32(ptr %x, ptr %y, ptr %z) {
; CHECK-LABEL: fma_v8f32:
@@ -1571,7 +2235,6 @@ define void @fma_v8f32(ptr %x, ptr %y, ptr %z) {
store <8 x float> %d, ptr %x
ret void
}
-declare <8 x float> @llvm.fma.v8f32(<8 x float>, <8 x float>, <8 x float>)
define void @fma_v4f64(ptr %x, ptr %y, ptr %z) {
; CHECK-LABEL: fma_v4f64:
@@ -1590,7 +2253,53 @@ define void @fma_v4f64(ptr %x, ptr %y, ptr %z) {
store <4 x double> %d, ptr %x
ret void
}
-declare <4 x double> @llvm.fma.v4f64(<4 x double>, <4 x double>, <4 x double>)
+
+define void @fadd_vf_v8bf16(ptr %x, bfloat %y) {
+; CHECK-LABEL: fadd_vf_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: fmv.x.w a1, fa0
+; CHECK-NEXT: vmv.v.x v9, a1
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v10, v12
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = insertelement <8 x bfloat> poison, bfloat %y, i32 0
+ %c = shufflevector <8 x bfloat> %b, <8 x bfloat> poison, <8 x i32> zeroinitializer
+ %d = fadd <8 x bfloat> %a, %c
+ store <8 x bfloat> %d, ptr %x
+ ret void
+}
+
+define void @fadd_vf_v6bf16(ptr %x, bfloat %y) {
+; CHECK-LABEL: fadd_vf_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: fmv.x.w a1, fa0
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a1
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v10, v12
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = insertelement <6 x bfloat> poison, bfloat %y, i32 0
+ %c = shufflevector <6 x bfloat> %b, <6 x bfloat> poison, <6 x i32> zeroinitializer
+ %d = fadd <6 x bfloat> %a, %c
+ store <6 x bfloat> %d, ptr %x
+ ret void
+}
define void @fadd_vf_v8f16(ptr %x, half %y) {
; ZVFH-LABEL: fadd_vf_v8f16:
@@ -1687,6 +2396,53 @@ define void @fadd_vf_v2f64(ptr %x, double %y) {
ret void
}
+define void @fadd_fv_v8bf16(ptr %x, bfloat %y) {
+; CHECK-LABEL: fadd_fv_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: fmv.x.w a1, fa0
+; CHECK-NEXT: vmv.v.x v9, a1
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v12, v10
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = insertelement <8 x bfloat> poison, bfloat %y, i32 0
+ %c = shufflevector <8 x bfloat> %b, <8 x bfloat> poison, <8 x i32> zeroinitializer
+ %d = fadd <8 x bfloat> %c, %a
+ store <8 x bfloat> %d, ptr %x
+ ret void
+}
+
+define void @fadd_fv_v6bf16(ptr %x, bfloat %y) {
+; CHECK-LABEL: fadd_fv_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: fmv.x.w a1, fa0
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a1
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v12, v10
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = insertelement <6 x bfloat> poison, bfloat %y, i32 0
+ %c = shufflevector <6 x bfloat> %b, <6 x bfloat> poison, <6 x i32> zeroinitializer
+ %d = fadd <6 x bfloat> %c, %a
+ store <6 x bfloat> %d, ptr %x
+ ret void
+}
+
define void @fadd_fv_v8f16(ptr %x, half %y) {
; ZVFH-LABEL: fadd_fv_v8f16:
; ZVFH: # %bb.0:
@@ -1782,6 +2538,53 @@ define void @fadd_fv_v2f64(ptr %x, double %y) {
ret void
}
+define void @fsub_vf_v8bf16(ptr %x, bfloat %y) {
+; CHECK-LABEL: fsub_vf_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: fmv.x.w a1, fa0
+; CHECK-NEXT: vmv.v.x v9, a1
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfsub.vv v8, v10, v12
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = insertelement <8 x bfloat> poison, bfloat %y, i32 0
+ %c = shufflevector <8 x bfloat> %b, <8 x bfloat> poison, <8 x i32> zeroinitializer
+ %d = fsub <8 x bfloat> %a, %c
+ store <8 x bfloat> %d, ptr %x
+ ret void
+}
+
+define void @fsub_vf_v6bf16(ptr %x, bfloat %y) {
+; CHECK-LABEL: fsub_vf_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: fmv.x.w a1, fa0
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a1
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfsub.vv v8, v10, v12
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = insertelement <6 x bfloat> poison, bfloat %y, i32 0
+ %c = shufflevector <6 x bfloat> %b, <6 x bfloat> poison, <6 x i32> zeroinitializer
+ %d = fsub <6 x bfloat> %a, %c
+ store <6 x bfloat> %d, ptr %x
+ ret void
+}
+
define void @fsub_vf_v8f16(ptr %x, half %y) {
; ZVFH-LABEL: fsub_vf_v8f16:
; ZVFH: # %bb.0:
@@ -1877,6 +2680,53 @@ define void @fsub_vf_v2f64(ptr %x, double %y) {
ret void
}
+define void @fsub_fv_v8bf16(ptr %x, bfloat %y) {
+; CHECK-LABEL: fsub_fv_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: fmv.x.w a1, fa0
+; CHECK-NEXT: vmv.v.x v9, a1
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfsub.vv v8, v12, v10
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = insertelement <8 x bfloat> poison, bfloat %y, i32 0
+ %c = shufflevector <8 x bfloat> %b, <8 x bfloat> poison, <8 x i32> zeroinitializer
+ %d = fsub <8 x bfloat> %c, %a
+ store <8 x bfloat> %d, ptr %x
+ ret void
+}
+
+define void @fsub_fv_v6bf16(ptr %x, bfloat %y) {
+; CHECK-LABEL: fsub_fv_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: fmv.x.w a1, fa0
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a1
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfsub.vv v8, v12, v10
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = insertelement <6 x bfloat> poison, bfloat %y, i32 0
+ %c = shufflevector <6 x bfloat> %b, <6 x bfloat> poison, <6 x i32> zeroinitializer
+ %d = fsub <6 x bfloat> %c, %a
+ store <6 x bfloat> %d, ptr %x
+ ret void
+}
+
define void @fsub_fv_v8f16(ptr %x, half %y) {
; ZVFH-LABEL: fsub_fv_v8f16:
; ZVFH: # %bb.0:
@@ -1972,6 +2822,53 @@ define void @fsub_fv_v2f64(ptr %x, double %y) {
ret void
}
+define void @fmul_vf_v8bf16(ptr %x, bfloat %y) {
+; CHECK-LABEL: fmul_vf_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: fmv.x.w a1, fa0
+; CHECK-NEXT: vmv.v.x v9, a1
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfmul.vv v8, v10, v12
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = insertelement <8 x bfloat> poison, bfloat %y, i32 0
+ %c = shufflevector <8 x bfloat> %b, <8 x bfloat> poison, <8 x i32> zeroinitializer
+ %d = fmul <8 x bfloat> %a, %c
+ store <8 x bfloat> %d, ptr %x
+ ret void
+}
+
+define void @fmul_vf_v6bf16(ptr %x, bfloat %y) {
+; CHECK-LABEL: fmul_vf_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: fmv.x.w a1, fa0
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a1
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfmul.vv v8, v10, v12
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = insertelement <6 x bfloat> poison, bfloat %y, i32 0
+ %c = shufflevector <6 x bfloat> %b, <6 x bfloat> poison, <6 x i32> zeroinitializer
+ %d = fmul <6 x bfloat> %a, %c
+ store <6 x bfloat> %d, ptr %x
+ ret void
+}
+
define void @fmul_vf_v8f16(ptr %x, half %y) {
; ZVFH-LABEL: fmul_vf_v8f16:
; ZVFH: # %bb.0:
@@ -2067,6 +2964,53 @@ define void @fmul_vf_v2f64(ptr %x, double %y) {
ret void
}
+define void @fmul_fv_v8bf16(ptr %x, bfloat %y) {
+; CHECK-LABEL: fmul_fv_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: fmv.x.w a1, fa0
+; CHECK-NEXT: vmv.v.x v9, a1
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfmul.vv v8, v12, v10
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = insertelement <8 x bfloat> poison, bfloat %y, i32 0
+ %c = shufflevector <8 x bfloat> %b, <8 x bfloat> poison, <8 x i32> zeroinitializer
+ %d = fmul <8 x bfloat> %c, %a
+ store <8 x bfloat> %d, ptr %x
+ ret void
+}
+
+define void @fmul_fv_v6bf16(ptr %x, bfloat %y) {
+; CHECK-LABEL: fmul_fv_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: fmv.x.w a1, fa0
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a1
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfmul.vv v8, v12, v10
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = insertelement <6 x bfloat> poison, bfloat %y, i32 0
+ %c = shufflevector <6 x bfloat> %b, <6 x bfloat> poison, <6 x i32> zeroinitializer
+ %d = fmul <6 x bfloat> %c, %a
+ store <6 x bfloat> %d, ptr %x
+ ret void
+}
+
define void @fmul_fv_v8f16(ptr %x, half %y) {
; ZVFH-LABEL: fmul_fv_v8f16:
; ZVFH: # %bb.0:
@@ -2162,6 +3106,53 @@ define void @fmul_fv_v2f64(ptr %x, double %y) {
ret void
}
+define void @fdiv_vf_v8bf16(ptr %x, bfloat %y) {
+; CHECK-LABEL: fdiv_vf_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: fmv.x.w a1, fa0
+; CHECK-NEXT: vmv.v.x v9, a1
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfdiv.vv v8, v10, v12
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = insertelement <8 x bfloat> poison, bfloat %y, i32 0
+ %c = shufflevector <8 x bfloat> %b, <8 x bfloat> poison, <8 x i32> zeroinitializer
+ %d = fdiv <8 x bfloat> %a, %c
+ store <8 x bfloat> %d, ptr %x
+ ret void
+}
+
+define void @fdiv_vf_v6bf16(ptr %x, bfloat %y) {
+; CHECK-LABEL: fdiv_vf_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: fmv.x.w a1, fa0
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a1
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfdiv.vv v8, v10, v12
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = insertelement <6 x bfloat> poison, bfloat %y, i32 0
+ %c = shufflevector <6 x bfloat> %b, <6 x bfloat> poison, <6 x i32> zeroinitializer
+ %d = fdiv <6 x bfloat> %a, %c
+ store <6 x bfloat> %d, ptr %x
+ ret void
+}
+
define void @fdiv_vf_v8f16(ptr %x, half %y) {
; ZVFH-LABEL: fdiv_vf_v8f16:
; ZVFH: # %bb.0:
@@ -2257,6 +3248,53 @@ define void @fdiv_vf_v2f64(ptr %x, double %y) {
ret void
}
+define void @fdiv_fv_v8bf16(ptr %x, bfloat %y) {
+; CHECK-LABEL: fdiv_fv_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: fmv.x.w a1, fa0
+; CHECK-NEXT: vmv.v.x v9, a1
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfdiv.vv v8, v12, v10
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = insertelement <8 x bfloat> poison, bfloat %y, i32 0
+ %c = shufflevector <8 x bfloat> %b, <8 x bfloat> poison, <8 x i32> zeroinitializer
+ %d = fdiv <8 x bfloat> %c, %a
+ store <8 x bfloat> %d, ptr %x
+ ret void
+}
+
+define void @fdiv_fv_v6bf16(ptr %x, bfloat %y) {
+; CHECK-LABEL: fdiv_fv_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: fmv.x.w a1, fa0
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a1
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfdiv.vv v8, v12, v10
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = insertelement <6 x bfloat> poison, bfloat %y, i32 0
+ %c = shufflevector <6 x bfloat> %b, <6 x bfloat> poison, <6 x i32> zeroinitializer
+ %d = fdiv <6 x bfloat> %c, %a
+ store <6 x bfloat> %d, ptr %x
+ ret void
+}
+
define void @fdiv_fv_v8f16(ptr %x, half %y) {
; ZVFH-LABEL: fdiv_fv_v8f16:
; ZVFH: # %bb.0:
@@ -2352,6 +3390,59 @@ define void @fdiv_fv_v2f64(ptr %x, double %y) {
ret void
}
+define void @fma_vf_v8bf16(ptr %x, ptr %y, bfloat %z) {
+; CHECK-LABEL: fma_vf_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: fmv.x.w a1, fa0
+; CHECK-NEXT: vmv.v.x v10, a1
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v14, v9
+; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfmadd.vv v8, v14, v12
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = load <8 x bfloat>, ptr %y
+ %c = insertelement <8 x bfloat> poison, bfloat %z, i32 0
+ %d = shufflevector <8 x bfloat> %c, <8 x bfloat> poison, <8 x i32> zeroinitializer
+ %e = call <8 x bfloat> @llvm.fma.v8bf16(<8 x bfloat> %a, <8 x bfloat> %d, <8 x bfloat> %b)
+ store <8 x bfloat> %e, ptr %x
+ ret void
+}
+
+define void @fma_vf_v6bf16(ptr %x, ptr %y, bfloat %z) {
+; CHECK-LABEL: fma_vf_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: fmv.x.w a1, fa0
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v10, a1
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v14, v9
+; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfmadd.vv v8, v14, v12
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = load <6 x bfloat>, ptr %y
+ %c = insertelement <6 x bfloat> poison, bfloat %z, i32 0
+ %d = shufflevector <6 x bfloat> %c, <6 x bfloat> poison, <6 x i32> zeroinitializer
+ %e = call <6 x bfloat> @llvm.fma.v6bf16(<6 x bfloat> %a, <6 x bfloat> %d, <6 x bfloat> %b)
+ store <6 x bfloat> %e, ptr %x
+ ret void
+}
+
define void @fma_vf_v8f16(ptr %x, ptr %y, half %z) {
; ZVFH-LABEL: fma_vf_v8f16:
; ZVFH: # %bb.0:
@@ -2459,6 +3550,59 @@ define void @fma_vf_v2f64(ptr %x, ptr %y, double %z) {
ret void
}
+define void @fma_fv_v8bf16(ptr %x, ptr %y, bfloat %z) {
+; CHECK-LABEL: fma_fv_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: fmv.x.w a1, fa0
+; CHECK-NEXT: vmv.v.x v10, a1
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v14, v9
+; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfmadd.vv v8, v14, v12
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = load <8 x bfloat>, ptr %y
+ %c = insertelement <8 x bfloat> poison, bfloat %z, i32 0
+ %d = shufflevector <8 x bfloat> %c, <8 x bfloat> poison, <8 x i32> zeroinitializer
+ %e = call <8 x bfloat> @llvm.fma.v8bf16(<8 x bfloat> %d, <8 x bfloat> %a, <8 x bfloat> %b)
+ store <8 x bfloat> %e, ptr %x
+ ret void
+}
+
+define void @fma_fv_v6bf16(ptr %x, ptr %y, bfloat %z) {
+; CHECK-LABEL: fma_fv_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: fmv.x.w a1, fa0
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v10, a1
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v14, v9
+; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfmadd.vv v8, v14, v12
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = load <6 x bfloat>, ptr %y
+ %c = insertelement <6 x bfloat> poison, bfloat %z, i32 0
+ %d = shufflevector <6 x bfloat> %c, <6 x bfloat> poison, <6 x i32> zeroinitializer
+ %e = call <6 x bfloat> @llvm.fma.v6bf16(<6 x bfloat> %d, <6 x bfloat> %a, <6 x bfloat> %b)
+ store <6 x bfloat> %e, ptr %x
+ ret void
+}
+
define void @fma_fv_v8f16(ptr %x, ptr %y, half %z) {
; ZVFH-LABEL: fma_fv_v8f16:
; ZVFH: # %bb.0:
@@ -2566,6 +3710,65 @@ define void @fma_fv_v2f64(ptr %x, ptr %y, double %z) {
ret void
}
+define void @fmsub_vf_v8bf16(ptr %x, ptr %y, bfloat %z) {
+; CHECK-LABEL: fmsub_vf_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: fmv.x.w a2, fa0
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: vmv.v.x v10, a2
+; CHECK-NEXT: lui a1, 8
+; CHECK-NEXT: vxor.vx v8, v8, a1
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v14, v9
+; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfmadd.vv v8, v14, v12
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = load <8 x bfloat>, ptr %y
+ %c = insertelement <8 x bfloat> poison, bfloat %z, i32 0
+ %d = shufflevector <8 x bfloat> %c, <8 x bfloat> poison, <8 x i32> zeroinitializer
+ %neg = fneg <8 x bfloat> %b
+ %e = call <8 x bfloat> @llvm.fma.v8bf16(<8 x bfloat> %a, <8 x bfloat> %d, <8 x bfloat> %neg)
+ store <8 x bfloat> %e, ptr %x
+ ret void
+}
+
+define void @fmsub_vf_v6bf16(ptr %x, ptr %y, bfloat %z) {
+; CHECK-LABEL: fmsub_vf_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: fmv.x.w a2, fa0
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v10, a2
+; CHECK-NEXT: lui a1, 8
+; CHECK-NEXT: vxor.vx v8, v8, a1
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v14, v9
+; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfmadd.vv v8, v14, v12
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = load <6 x bfloat>, ptr %y
+ %c = insertelement <6 x bfloat> poison, bfloat %z, i32 0
+ %d = shufflevector <6 x bfloat> %c, <6 x bfloat> poison, <6 x i32> zeroinitializer
+ %neg = fneg <6 x bfloat> %b
+ %e = call <6 x bfloat> @llvm.fma.v6bf16(<6 x bfloat> %a, <6 x bfloat> %d, <6 x bfloat> %neg)
+ store <6 x bfloat> %e, ptr %x
+ ret void
+}
+
define void @fmsub_vf_v8f16(ptr %x, ptr %y, half %z) {
; ZVFH-LABEL: fmsub_vf_v8f16:
; ZVFH: # %bb.0:
@@ -2721,13 +3924,64 @@ define void @fnmadd_fv_v2f64(ptr %x, ptr %y, double %z) {
ret void
}
+define void @trunc_v8bf16(ptr %x) {
+; CHECK-LABEL: trunc_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfabs.v v8, v10
+; CHECK-NEXT: lui a1, 307200
+; CHECK-NEXT: fmv.w.x fa5, a1
+; CHECK-NEXT: vmflt.vf v0, v8, fa5
+; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v10, v0.t
+; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; CHECK-NEXT: vfsgnj.vv v10, v8, v10, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = call <8 x bfloat> @llvm.trunc.v8bf16(<8 x bfloat> %a)
+ store <8 x bfloat> %b, ptr %x
+ ret void
+}
+
+define void @trunc_v6bf16(ptr %x) {
+; CHECK-LABEL: trunc_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfabs.v v8, v10
+; CHECK-NEXT: lui a1, 307200
+; CHECK-NEXT: fmv.w.x fa5, a1
+; CHECK-NEXT: vmflt.vf v0, v8, fa5
+; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v10, v0.t
+; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; CHECK-NEXT: vfsgnj.vv v10, v8, v10, v0.t
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = call <6 x bfloat> @llvm.trunc.v6bf16(<6 x bfloat> %a)
+ store <6 x bfloat> %b, ptr %x
+ ret void
+}
+
define void @trunc_v8f16(ptr %x) {
; ZVFH-LABEL: trunc_v8f16:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFH-NEXT: vle16.v v8, (a0)
-; ZVFH-NEXT: lui a1, %hi(.LCPI115_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI115_0)(a1)
+; ZVFH-NEXT: lui a1, %hi(.LCPI171_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI171_0)(a1)
; ZVFH-NEXT: vfabs.v v9, v8
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
@@ -2760,15 +4014,14 @@ define void @trunc_v8f16(ptr %x) {
store <8 x half> %b, ptr %x
ret void
}
-declare <8 x half> @llvm.trunc.v8f16(<8 x half>)
define void @trunc_v6f16(ptr %x) {
; ZVFH-LABEL: trunc_v6f16:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetivli zero, 6, e16, m1, ta, ma
; ZVFH-NEXT: vle16.v v8, (a0)
-; ZVFH-NEXT: lui a1, %hi(.LCPI116_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI116_0)(a1)
+; ZVFH-NEXT: lui a1, %hi(.LCPI172_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI172_0)(a1)
; ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
@@ -2803,7 +4056,6 @@ define void @trunc_v6f16(ptr %x) {
store <6 x half> %b, ptr %x
ret void
}
-declare <6 x half> @llvm.trunc.v6f16(<6 x half>)
define void @trunc_v4f32(ptr %x) {
; CHECK-LABEL: trunc_v4f32:
@@ -2825,15 +4077,14 @@ define void @trunc_v4f32(ptr %x) {
store <4 x float> %b, ptr %x
ret void
}
-declare <4 x float> @llvm.trunc.v4f32(<4 x float>)
define void @trunc_v2f64(ptr %x) {
; CHECK-LABEL: trunc_v2f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; CHECK-NEXT: vle64.v v8, (a0)
-; CHECK-NEXT: lui a1, %hi(.LCPI118_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI118_0)(a1)
+; CHECK-NEXT: lui a1, %hi(.LCPI174_0)
+; CHECK-NEXT: fld fa5, %lo(.LCPI174_0)(a1)
; CHECK-NEXT: vfabs.v v9, v8
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
@@ -2847,15 +4098,69 @@ define void @trunc_v2f64(ptr %x) {
store <2 x double> %b, ptr %x
ret void
}
-declare <2 x double> @llvm.trunc.v2f64(<2 x double>)
+
+define void @ceil_v8bf16(ptr %x) {
+; CHECK-LABEL: ceil_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfabs.v v8, v10
+; CHECK-NEXT: lui a1, 307200
+; CHECK-NEXT: fmv.w.x fa5, a1
+; CHECK-NEXT: vmflt.vf v0, v8, fa5
+; CHECK-NEXT: fsrmi a1, 3
+; CHECK-NEXT: vfcvt.x.f.v v8, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; CHECK-NEXT: vfsgnj.vv v10, v8, v10, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = call <8 x bfloat> @llvm.ceil.v8bf16(<8 x bfloat> %a)
+ store <8 x bfloat> %b, ptr %x
+ ret void
+}
+
+define void @ceil_v6bf16(ptr %x) {
+; CHECK-LABEL: ceil_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfabs.v v8, v10
+; CHECK-NEXT: lui a1, 307200
+; CHECK-NEXT: fmv.w.x fa5, a1
+; CHECK-NEXT: vmflt.vf v0, v8, fa5
+; CHECK-NEXT: fsrmi a1, 3
+; CHECK-NEXT: vfcvt.x.f.v v8, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; CHECK-NEXT: vfsgnj.vv v10, v8, v10, v0.t
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = call <6 x bfloat> @llvm.ceil.v6bf16(<6 x bfloat> %a)
+ store <6 x bfloat> %b, ptr %x
+ ret void
+}
define void @ceil_v8f16(ptr %x) {
; ZVFH-LABEL: ceil_v8f16:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFH-NEXT: vle16.v v8, (a0)
-; ZVFH-NEXT: lui a1, %hi(.LCPI119_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI119_0)(a1)
+; ZVFH-NEXT: lui a1, %hi(.LCPI177_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI177_0)(a1)
; ZVFH-NEXT: vfabs.v v9, v8
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a1, 3
@@ -2892,15 +4197,14 @@ define void @ceil_v8f16(ptr %x) {
store <8 x half> %b, ptr %x
ret void
}
-declare <8 x half> @llvm.ceil.v8f16(<8 x half>)
define void @ceil_v6f16(ptr %x) {
; ZVFH-LABEL: ceil_v6f16:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetivli zero, 6, e16, m1, ta, ma
; ZVFH-NEXT: vle16.v v8, (a0)
-; ZVFH-NEXT: lui a1, %hi(.LCPI120_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI120_0)(a1)
+; ZVFH-NEXT: lui a1, %hi(.LCPI178_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI178_0)(a1)
; ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
@@ -2939,7 +4243,6 @@ define void @ceil_v6f16(ptr %x) {
store <6 x half> %b, ptr %x
ret void
}
-declare <6 x half> @llvm.ceil.v6f16(<6 x half>)
define void @ceil_v4f32(ptr %x) {
; CHECK-LABEL: ceil_v4f32:
@@ -2963,15 +4266,14 @@ define void @ceil_v4f32(ptr %x) {
store <4 x float> %b, ptr %x
ret void
}
-declare <4 x float> @llvm.ceil.v4f32(<4 x float>)
define void @ceil_v2f64(ptr %x) {
; CHECK-LABEL: ceil_v2f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; CHECK-NEXT: vle64.v v8, (a0)
-; CHECK-NEXT: lui a1, %hi(.LCPI122_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI122_0)(a1)
+; CHECK-NEXT: lui a1, %hi(.LCPI180_0)
+; CHECK-NEXT: fld fa5, %lo(.LCPI180_0)(a1)
; CHECK-NEXT: vfabs.v v9, v8
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: fsrmi a1, 3
@@ -2987,15 +4289,69 @@ define void @ceil_v2f64(ptr %x) {
store <2 x double> %b, ptr %x
ret void
}
-declare <2 x double> @llvm.ceil.v2f64(<2 x double>)
+
+define void @floor_v8bf16(ptr %x) {
+; CHECK-LABEL: floor_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfabs.v v8, v10
+; CHECK-NEXT: lui a1, 307200
+; CHECK-NEXT: fmv.w.x fa5, a1
+; CHECK-NEXT: vmflt.vf v0, v8, fa5
+; CHECK-NEXT: fsrmi a1, 2
+; CHECK-NEXT: vfcvt.x.f.v v8, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; CHECK-NEXT: vfsgnj.vv v10, v8, v10, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = call <8 x bfloat> @llvm.floor.v8bf16(<8 x bfloat> %a)
+ store <8 x bfloat> %b, ptr %x
+ ret void
+}
+
+define void @floor_v6bf16(ptr %x) {
+; CHECK-LABEL: floor_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfabs.v v8, v10
+; CHECK-NEXT: lui a1, 307200
+; CHECK-NEXT: fmv.w.x fa5, a1
+; CHECK-NEXT: vmflt.vf v0, v8, fa5
+; CHECK-NEXT: fsrmi a1, 2
+; CHECK-NEXT: vfcvt.x.f.v v8, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; CHECK-NEXT: vfsgnj.vv v10, v8, v10, v0.t
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = call <6 x bfloat> @llvm.floor.v6bf16(<6 x bfloat> %a)
+ store <6 x bfloat> %b, ptr %x
+ ret void
+}
define void @floor_v8f16(ptr %x) {
; ZVFH-LABEL: floor_v8f16:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFH-NEXT: vle16.v v8, (a0)
-; ZVFH-NEXT: lui a1, %hi(.LCPI123_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI123_0)(a1)
+; ZVFH-NEXT: lui a1, %hi(.LCPI183_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI183_0)(a1)
; ZVFH-NEXT: vfabs.v v9, v8
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a1, 2
@@ -3032,15 +4388,14 @@ define void @floor_v8f16(ptr %x) {
store <8 x half> %b, ptr %x
ret void
}
-declare <8 x half> @llvm.floor.v8f16(<8 x half>)
define void @floor_v6f16(ptr %x) {
; ZVFH-LABEL: floor_v6f16:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetivli zero, 6, e16, m1, ta, ma
; ZVFH-NEXT: vle16.v v8, (a0)
-; ZVFH-NEXT: lui a1, %hi(.LCPI124_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI124_0)(a1)
+; ZVFH-NEXT: lui a1, %hi(.LCPI184_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI184_0)(a1)
; ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
@@ -3079,7 +4434,6 @@ define void @floor_v6f16(ptr %x) {
store <6 x half> %b, ptr %x
ret void
}
-declare <6 x half> @llvm.floor.v6f16(<6 x half>)
define void @floor_v4f32(ptr %x) {
; CHECK-LABEL: floor_v4f32:
@@ -3103,15 +4457,14 @@ define void @floor_v4f32(ptr %x) {
store <4 x float> %b, ptr %x
ret void
}
-declare <4 x float> @llvm.floor.v4f32(<4 x float>)
define void @floor_v2f64(ptr %x) {
; CHECK-LABEL: floor_v2f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; CHECK-NEXT: vle64.v v8, (a0)
-; CHECK-NEXT: lui a1, %hi(.LCPI126_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI126_0)(a1)
+; CHECK-NEXT: lui a1, %hi(.LCPI186_0)
+; CHECK-NEXT: fld fa5, %lo(.LCPI186_0)(a1)
; CHECK-NEXT: vfabs.v v9, v8
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: fsrmi a1, 2
@@ -3127,15 +4480,69 @@ define void @floor_v2f64(ptr %x) {
store <2 x double> %b, ptr %x
ret void
}
-declare <2 x double> @llvm.floor.v2f64(<2 x double>)
+
+define void @round_v8bf16(ptr %x) {
+; CHECK-LABEL: round_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfabs.v v8, v10
+; CHECK-NEXT: lui a1, 307200
+; CHECK-NEXT: fmv.w.x fa5, a1
+; CHECK-NEXT: vmflt.vf v0, v8, fa5
+; CHECK-NEXT: fsrmi a1, 4
+; CHECK-NEXT: vfcvt.x.f.v v8, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; CHECK-NEXT: vfsgnj.vv v10, v8, v10, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = call <8 x bfloat> @llvm.round.v8bf16(<8 x bfloat> %a)
+ store <8 x bfloat> %b, ptr %x
+ ret void
+}
+
+define void @round_v6bf16(ptr %x) {
+; CHECK-LABEL: round_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfabs.v v8, v10
+; CHECK-NEXT: lui a1, 307200
+; CHECK-NEXT: fmv.w.x fa5, a1
+; CHECK-NEXT: vmflt.vf v0, v8, fa5
+; CHECK-NEXT: fsrmi a1, 4
+; CHECK-NEXT: vfcvt.x.f.v v8, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; CHECK-NEXT: vfsgnj.vv v10, v8, v10, v0.t
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = call <6 x bfloat> @llvm.round.v6bf16(<6 x bfloat> %a)
+ store <6 x bfloat> %b, ptr %x
+ ret void
+}
define void @round_v8f16(ptr %x) {
; ZVFH-LABEL: round_v8f16:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFH-NEXT: vle16.v v8, (a0)
-; ZVFH-NEXT: lui a1, %hi(.LCPI127_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI127_0)(a1)
+; ZVFH-NEXT: lui a1, %hi(.LCPI189_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI189_0)(a1)
; ZVFH-NEXT: vfabs.v v9, v8
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a1, 4
@@ -3172,15 +4579,14 @@ define void @round_v8f16(ptr %x) {
store <8 x half> %b, ptr %x
ret void
}
-declare <8 x half> @llvm.round.v8f16(<8 x half>)
define void @round_v6f16(ptr %x) {
; ZVFH-LABEL: round_v6f16:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetivli zero, 6, e16, m1, ta, ma
; ZVFH-NEXT: vle16.v v8, (a0)
-; ZVFH-NEXT: lui a1, %hi(.LCPI128_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI128_0)(a1)
+; ZVFH-NEXT: lui a1, %hi(.LCPI190_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI190_0)(a1)
; ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
@@ -3219,7 +4625,6 @@ define void @round_v6f16(ptr %x) {
store <6 x half> %b, ptr %x
ret void
}
-declare <6 x half> @llvm.round.v6f16(<6 x half>)
define void @round_v4f32(ptr %x) {
; CHECK-LABEL: round_v4f32:
@@ -3243,15 +4648,14 @@ define void @round_v4f32(ptr %x) {
store <4 x float> %b, ptr %x
ret void
}
-declare <4 x float> @llvm.round.v4f32(<4 x float>)
define void @round_v2f64(ptr %x) {
; CHECK-LABEL: round_v2f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; CHECK-NEXT: vle64.v v8, (a0)
-; CHECK-NEXT: lui a1, %hi(.LCPI130_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI130_0)(a1)
+; CHECK-NEXT: lui a1, %hi(.LCPI192_0)
+; CHECK-NEXT: fld fa5, %lo(.LCPI192_0)(a1)
; CHECK-NEXT: vfabs.v v9, v8
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: fsrmi a1, 4
@@ -3267,15 +4671,39 @@ define void @round_v2f64(ptr %x) {
store <2 x double> %b, ptr %x
ret void
}
-declare <2 x double> @llvm.round.v2f64(<2 x double>)
+
+define void @rint_v8bf16(ptr %x) {
+; CHECK-LABEL: rint_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfabs.v v8, v10
+; CHECK-NEXT: lui a1, 307200
+; CHECK-NEXT: fmv.w.x fa5, a1
+; CHECK-NEXT: vmflt.vf v0, v8, fa5
+; CHECK-NEXT: vfcvt.x.f.v v8, v10, v0.t
+; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; CHECK-NEXT: vfsgnj.vv v10, v8, v10, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = call <8 x bfloat> @llvm.rint.v8bf16(<8 x bfloat> %a)
+ store <8 x bfloat> %b, ptr %x
+ ret void
+}
define void @rint_v8f16(ptr %x) {
; ZVFH-LABEL: rint_v8f16:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFH-NEXT: vle16.v v8, (a0)
-; ZVFH-NEXT: lui a1, %hi(.LCPI131_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI131_0)(a1)
+; ZVFH-NEXT: lui a1, %hi(.LCPI194_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI194_0)(a1)
; ZVFH-NEXT: vfabs.v v9, v8
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -3308,7 +4736,6 @@ define void @rint_v8f16(ptr %x) {
store <8 x half> %b, ptr %x
ret void
}
-declare <8 x half> @llvm.rint.v8f16(<8 x half>)
define void @rint_v4f32(ptr %x) {
; CHECK-LABEL: rint_v4f32:
@@ -3330,15 +4757,14 @@ define void @rint_v4f32(ptr %x) {
store <4 x float> %b, ptr %x
ret void
}
-declare <4 x float> @llvm.rint.v4f32(<4 x float>)
define void @rint_v2f64(ptr %x) {
; CHECK-LABEL: rint_v2f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; CHECK-NEXT: vle64.v v8, (a0)
-; CHECK-NEXT: lui a1, %hi(.LCPI133_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI133_0)(a1)
+; CHECK-NEXT: lui a1, %hi(.LCPI196_0)
+; CHECK-NEXT: fld fa5, %lo(.LCPI196_0)(a1)
; CHECK-NEXT: vfabs.v v9, v8
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -3352,15 +4778,41 @@ define void @rint_v2f64(ptr %x) {
store <2 x double> %b, ptr %x
ret void
}
-declare <2 x double> @llvm.rint.v2f64(<2 x double>)
+
+define void @nearbyint_v8bf16(ptr %x) {
+; CHECK-LABEL: nearbyint_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfabs.v v8, v10
+; CHECK-NEXT: lui a1, 307200
+; CHECK-NEXT: fmv.w.x fa5, a1
+; CHECK-NEXT: vmflt.vf v0, v8, fa5
+; CHECK-NEXT: frflags a1
+; CHECK-NEXT: vfcvt.x.f.v v8, v10, v0.t
+; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; CHECK-NEXT: fsflags a1
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; CHECK-NEXT: vfsgnj.vv v10, v8, v10, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = call <8 x bfloat> @llvm.nearbyint.v8bf16(<8 x bfloat> %a)
+ store <8 x bfloat> %b, ptr %x
+ ret void
+}
define void @nearbyint_v8f16(ptr %x) {
; ZVFH-LABEL: nearbyint_v8f16:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFH-NEXT: vle16.v v8, (a0)
-; ZVFH-NEXT: lui a1, %hi(.LCPI134_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI134_0)(a1)
+; ZVFH-NEXT: lui a1, %hi(.LCPI198_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI198_0)(a1)
; ZVFH-NEXT: vfabs.v v9, v8
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: frflags a1
@@ -3397,7 +4849,6 @@ define void @nearbyint_v8f16(ptr %x) {
store <8 x half> %b, ptr %x
ret void
}
-declare <8 x half> @llvm.nearbyint.v8f16(<8 x half>)
define void @nearbyint_v4f32(ptr %x) {
; CHECK-LABEL: nearbyint_v4f32:
@@ -3421,15 +4872,14 @@ define void @nearbyint_v4f32(ptr %x) {
store <4 x float> %b, ptr %x
ret void
}
-declare <4 x float> @llvm.nearbyint.v4f32(<4 x float>)
define void @nearbyint_v2f64(ptr %x) {
; CHECK-LABEL: nearbyint_v2f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; CHECK-NEXT: vle64.v v8, (a0)
-; CHECK-NEXT: lui a1, %hi(.LCPI136_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI136_0)(a1)
+; CHECK-NEXT: lui a1, %hi(.LCPI200_0)
+; CHECK-NEXT: fld fa5, %lo(.LCPI200_0)(a1)
; CHECK-NEXT: vfabs.v v9, v8
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: frflags a1
@@ -3445,7 +4895,65 @@ define void @nearbyint_v2f64(ptr %x) {
store <2 x double> %b, ptr %x
ret void
}
-declare <2 x double> @llvm.nearbyint.v2f64(<2 x double>)
+
+define void @fmuladd_v8bf16(ptr %x, ptr %y, ptr %z) {
+; CHECK-LABEL: fmuladd_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: vle16.v v10, (a2)
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v14, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfmul.vv v8, v14, v12
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v11, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v11
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v8, v12
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = load <8 x bfloat>, ptr %y
+ %c = load <8 x bfloat>, ptr %z
+ %d = call <8 x bfloat> @llvm.fmuladd.v8bf16(<8 x bfloat> %a, <8 x bfloat> %b, <8 x bfloat> %c)
+ store <8 x bfloat> %d, ptr %x
+ ret void
+}
+
+define void @fmuladd_v6bf16(ptr %x, ptr %y, ptr %z) {
+; CHECK-LABEL: fmuladd_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: vle16.v v10, (a2)
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v14, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfmul.vv v8, v14, v12
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v11, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v11
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v8, v12
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = load <6 x bfloat>, ptr %y
+ %c = load <6 x bfloat>, ptr %z
+ %d = call <6 x bfloat> @llvm.fmuladd.v6bf16(<6 x bfloat> %a, <6 x bfloat> %b, <6 x bfloat> %c)
+ store <6 x bfloat> %d, ptr %x
+ ret void
+}
define void @fmuladd_v8f16(ptr %x, ptr %y, ptr %z) {
; ZVFH-LABEL: fmuladd_v8f16:
@@ -3485,7 +4993,6 @@ define void @fmuladd_v8f16(ptr %x, ptr %y, ptr %z) {
store <8 x half> %d, ptr %x
ret void
}
-declare <8 x half> @llvm.fmuladd.v8f16(<8 x half>, <8 x half>, <8 x half>)
define void @fmuladd_v6f16(ptr %x, ptr %y, ptr %z) {
; ZVFH-LABEL: fmuladd_v6f16:
@@ -3526,7 +5033,6 @@ define void @fmuladd_v6f16(ptr %x, ptr %y, ptr %z) {
store <6 x half> %d, ptr %x
ret void
}
-declare <6 x half> @llvm.fmuladd.v6f16(<6 x half>, <6 x half>, <6 x half>)
define void @fmuladd_v4f32(ptr %x, ptr %y, ptr %z) {
; CHECK-LABEL: fmuladd_v4f32:
@@ -3545,7 +5051,6 @@ define void @fmuladd_v4f32(ptr %x, ptr %y, ptr %z) {
store <4 x float> %d, ptr %x
ret void
}
-declare <4 x float> @llvm.fmuladd.v4f32(<4 x float>, <4 x float>, <4 x float>)
define void @fmuladd_v2f64(ptr %x, ptr %y, ptr %z) {
; CHECK-LABEL: fmuladd_v2f64:
@@ -3564,7 +5069,67 @@ define void @fmuladd_v2f64(ptr %x, ptr %y, ptr %z) {
store <2 x double> %d, ptr %x
ret void
}
-declare <2 x double> @llvm.fmuladd.v2f64(<2 x double>, <2 x double>, <2 x double>)
+
+define void @fmsub_fmuladd_v8bf16(ptr %x, ptr %y, ptr %z) {
+; CHECK-LABEL: fmsub_fmuladd_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: vle16.v v10, (a2)
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v14, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfmul.vv v8, v14, v12
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v11, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v11
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfsub.vv v8, v8, v12
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = load <8 x bfloat>, ptr %y
+ %c = load <8 x bfloat>, ptr %z
+ %neg = fneg <8 x bfloat> %c
+ %d = call <8 x bfloat> @llvm.fmuladd.v8bf16(<8 x bfloat> %a, <8 x bfloat> %b, <8 x bfloat> %neg)
+ store <8 x bfloat> %d, ptr %x
+ ret void
+}
+
+define void @fmsub_fmuladd_v6bf16(ptr %x, ptr %y, ptr %z) {
+; CHECK-LABEL: fmsub_fmuladd_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: vle16.v v10, (a2)
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v14, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfmul.vv v8, v14, v12
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v11, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v11
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfsub.vv v8, v8, v12
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = load <6 x bfloat>, ptr %y
+ %c = load <6 x bfloat>, ptr %z
+ %neg = fneg <6 x bfloat> %c
+ %d = call <6 x bfloat> @llvm.fmuladd.v6bf16(<6 x bfloat> %a, <6 x bfloat> %b, <6 x bfloat> %neg)
+ store <6 x bfloat> %d, ptr %x
+ ret void
+}
define void @fmsub_fmuladd_v8f16(ptr %x, ptr %y, ptr %z) {
; ZVFH-LABEL: fmsub_fmuladd_v8f16:
More information about the llvm-commits
mailing list