[llvm] ecf260e - [RISCV] Merge bf16 tests into respective tests. NFC

Luke Lau via llvm-commits llvm-commits at lists.llvm.org
Thu Sep 12 03:44:38 PDT 2024


Author: Luke Lau
Date: 2024-09-12T18:41:46+08:00
New Revision: ecf260e1dd8fb1c3d81e6f8af99907ae250058cb

URL: https://github.com/llvm/llvm-project/commit/ecf260e1dd8fb1c3d81e6f8af99907ae250058cb
DIFF: https://github.com/llvm/llvm-project/commit/ecf260e1dd8fb1c3d81e6f8af99907ae250058cb.diff

LOG: [RISCV] Merge bf16 tests into respective tests. NFC

I added these in #108245, but given the sheer number of tests that
will need to be added to cover bf16 promotion to f32 it seems better
to keep them in one place to avoid an explosion of files.

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vfcopysign-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode.ll

Removed: 
    llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode-bf16.ll
    llvm/test/CodeGen/RISCV/rvv/vfcopysign-sdnode-bf16.ll
    llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode-bf16.ll


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode-bf16.ll b/llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode-bf16.ll
deleted file mode 100644
index 1c6a57d99dd0af..00000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode-bf16.ll
+++ /dev/null
@@ -1,75 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+zvfbfmin -verify-machineinstrs | FileCheck %s
-; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+zvfbfmin -verify-machineinstrs | FileCheck %s
-
-define <vscale x 1 x bfloat> @nxv1bf16(<vscale x 1 x bfloat> %v) {
-; CHECK-LABEL: nxv1bf16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, 8
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
-; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    ret
-  %r = call <vscale x 1 x bfloat> @llvm.fabs.nxv1bf16(<vscale x 1 x bfloat> %v)
-  ret <vscale x 1 x bfloat> %r
-}
-
-define <vscale x 2 x bfloat> @nxv2bf16(<vscale x 2 x bfloat> %v) {
-; CHECK-LABEL: nxv2bf16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, 8
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
-; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    ret
-  %r = call <vscale x 2 x bfloat> @llvm.fabs.nxv2bf16(<vscale x 2 x bfloat> %v)
-  ret <vscale x 2 x bfloat> %r
-}
-
-define <vscale x 4 x bfloat> @nxv4bf16(<vscale x 4 x bfloat> %v) {
-; CHECK-LABEL: nxv4bf16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, 8
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
-; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    ret
-  %r = call <vscale x 4 x bfloat> @llvm.fabs.nxv4bf16(<vscale x 4 x bfloat> %v)
-  ret <vscale x 4 x bfloat> %r
-}
-
-define <vscale x 8 x bfloat> @nxv8bf16(<vscale x 8 x bfloat> %v) {
-; CHECK-LABEL: nxv8bf16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, 8
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
-; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    ret
-  %r = call <vscale x 8 x bfloat> @llvm.fabs.nxv8bf16(<vscale x 8 x bfloat> %v)
-  ret <vscale x 8 x bfloat> %r
-}
-
-define <vscale x 16 x bfloat> @nxv16bf16(<vscale x 16 x bfloat> %v) {
-; CHECK-LABEL: nxv16bf16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, 8
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    ret
-  %r = call <vscale x 16 x bfloat> @llvm.fabs.nxv16bf16(<vscale x 16 x bfloat> %v)
-  ret <vscale x 16 x bfloat> %r
-}
-
-define <vscale x 32 x bfloat> @nxv32bf16(<vscale x 32 x bfloat> %v) {
-; CHECK-LABEL: nxv32bf16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, 8
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
-; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    ret
-  %r = call <vscale x 32 x bfloat> @llvm.fabs.nxv32bf16(<vscale x 32 x bfloat> %v)
-  ret <vscale x 32 x bfloat> %r
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode.ll
index 4bf9ae16cdaf01..c8313c902697bc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode.ll
@@ -1,12 +1,88 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d \
-; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
-; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \
-; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
-; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+v -target-abi=ilp32d \
-; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
-; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+v -target-abi=lp64d \
-; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+zvfbfmin,+v \
+; RUN:     -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
+; RUN:     --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+zvfbfmin,+v \
+; RUN:     -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
+; RUN:     --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+zvfbfmin,+v \
+; RUN:     -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
+; RUN:     --check-prefixes=CHECK,ZVFHMIN
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+zvfbfmin,+v \
+; RUN:     -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
+; RUN:     --check-prefixes=CHECK,ZVFHMIN
+
+define <vscale x 1 x bfloat> @nxv1bf16(<vscale x 1 x bfloat> %v) {
+; CHECK-LABEL: nxv1bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, 8
+; CHECK-NEXT:    addi a0, a0, -1
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:    ret
+  %r = call <vscale x 1 x bfloat> @llvm.fabs.nxv1bf16(<vscale x 1 x bfloat> %v)
+  ret <vscale x 1 x bfloat> %r
+}
+
+define <vscale x 2 x bfloat> @nxv2bf16(<vscale x 2 x bfloat> %v) {
+; CHECK-LABEL: nxv2bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, 8
+; CHECK-NEXT:    addi a0, a0, -1
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:    ret
+  %r = call <vscale x 2 x bfloat> @llvm.fabs.nxv2bf16(<vscale x 2 x bfloat> %v)
+  ret <vscale x 2 x bfloat> %r
+}
+
+define <vscale x 4 x bfloat> @nxv4bf16(<vscale x 4 x bfloat> %v) {
+; CHECK-LABEL: nxv4bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, 8
+; CHECK-NEXT:    addi a0, a0, -1
+; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:    ret
+  %r = call <vscale x 4 x bfloat> @llvm.fabs.nxv4bf16(<vscale x 4 x bfloat> %v)
+  ret <vscale x 4 x bfloat> %r
+}
+
+define <vscale x 8 x bfloat> @nxv8bf16(<vscale x 8 x bfloat> %v) {
+; CHECK-LABEL: nxv8bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, 8
+; CHECK-NEXT:    addi a0, a0, -1
+; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:    ret
+  %r = call <vscale x 8 x bfloat> @llvm.fabs.nxv8bf16(<vscale x 8 x bfloat> %v)
+  ret <vscale x 8 x bfloat> %r
+}
+
+define <vscale x 16 x bfloat> @nxv16bf16(<vscale x 16 x bfloat> %v) {
+; CHECK-LABEL: nxv16bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, 8
+; CHECK-NEXT:    addi a0, a0, -1
+; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:    ret
+  %r = call <vscale x 16 x bfloat> @llvm.fabs.nxv16bf16(<vscale x 16 x bfloat> %v)
+  ret <vscale x 16 x bfloat> %r
+}
+
+define <vscale x 32 x bfloat> @nxv32bf16(<vscale x 32 x bfloat> %v) {
+; CHECK-LABEL: nxv32bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, 8
+; CHECK-NEXT:    addi a0, a0, -1
+; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:    ret
+  %r = call <vscale x 32 x bfloat> @llvm.fabs.nxv32bf16(<vscale x 32 x bfloat> %v)
+  ret <vscale x 32 x bfloat> %r
+}
 
 declare <vscale x 1 x half> @llvm.fabs.nxv1f16(<vscale x 1 x half>)
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfcopysign-sdnode-bf16.ll b/llvm/test/CodeGen/RISCV/rvv/vfcopysign-sdnode-bf16.ll
deleted file mode 100644
index ee050baee1f791..00000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfcopysign-sdnode-bf16.ll
+++ /dev/null
@@ -1,87 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+zvfbfmin -verify-machineinstrs | FileCheck %s
-; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+zvfbfmin -verify-machineinstrs | FileCheck %s
-
-define <vscale x 1 x bfloat> @nxv1bf16(<vscale x 1 x bfloat> %vm, <vscale x 1 x bfloat> %vs) {
-; CHECK-LABEL: nxv1bf16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, 8
-; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
-; CHECK-NEXT:    vand.vx v9, v9, a0
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    ret
-  %r = call <vscale x 1 x bfloat> @llvm.copysign.nxv1bf16(<vscale x 1 x bfloat> %vm, <vscale x 1 x bfloat> %vs)
-  ret <vscale x 1 x bfloat> %r
-}
-
-define <vscale x 2 x bfloat> @nxv2bf16(<vscale x 2 x bfloat> %vm, <vscale x 2 x bfloat> %vs) {
-; CHECK-LABEL: nxv2bf16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, 8
-; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
-; CHECK-NEXT:    vand.vx v9, v9, a0
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    ret
-  %r = call <vscale x 2 x bfloat> @llvm.copysign.nxv2bf16(<vscale x 2 x bfloat> %vm, <vscale x 2 x bfloat> %vs)
-  ret <vscale x 2 x bfloat> %r
-}
-
-define <vscale x 4 x bfloat> @nxv4bf16(<vscale x 4 x bfloat> %vm, <vscale x 4 x bfloat> %vs) {
-; CHECK-LABEL: nxv4bf16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, 8
-; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
-; CHECK-NEXT:    vand.vx v9, v9, a0
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    vor.vv v8, v8, v9
-; CHECK-NEXT:    ret
-  %r = call <vscale x 4 x bfloat> @llvm.copysign.nxv4bf16(<vscale x 4 x bfloat> %vm, <vscale x 4 x bfloat> %vs)
-  ret <vscale x 4 x bfloat> %r
-}
-
-define <vscale x 8 x bfloat> @nxv8bf16(<vscale x 8 x bfloat> %vm, <vscale x 8 x bfloat> %vs) {
-; CHECK-LABEL: nxv8bf16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, 8
-; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
-; CHECK-NEXT:    vand.vx v10, v10, a0
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    vor.vv v8, v8, v10
-; CHECK-NEXT:    ret
-  %r = call <vscale x 8 x bfloat> @llvm.copysign.nxv8bf16(<vscale x 8 x bfloat> %vm, <vscale x 8 x bfloat> %vs)
-  ret <vscale x 8 x bfloat> %r
-}
-
-define <vscale x 16 x bfloat> @nxv16bf16(<vscale x 16 x bfloat> %vm, <vscale x 16 x bfloat> %vs) {
-; CHECK-LABEL: nxv16bf16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, 8
-; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vand.vx v12, v12, a0
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    vor.vv v8, v8, v12
-; CHECK-NEXT:    ret
-  %r = call <vscale x 16 x bfloat> @llvm.copysign.nxv16bf16(<vscale x 16 x bfloat> %vm, <vscale x 16 x bfloat> %vs)
-  ret <vscale x 16 x bfloat> %r
-}
-
-define <vscale x 32 x bfloat> @nxv32bf32(<vscale x 32 x bfloat> %vm, <vscale x 32 x bfloat> %vs) {
-; CHECK-LABEL: nxv32bf32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, 8
-; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
-; CHECK-NEXT:    vand.vx v16, v16, a0
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    vor.vv v8, v8, v16
-; CHECK-NEXT:    ret
-  %r = call <vscale x 32 x bfloat> @llvm.copysign.nxv32bf32(<vscale x 32 x bfloat> %vm, <vscale x 32 x bfloat> %vs)
-  ret <vscale x 32 x bfloat> %r
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfcopysign-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfcopysign-sdnode.ll
index f37e593798efe5..c1e1450d0b0a2a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfcopysign-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfcopysign-sdnode.ll
@@ -1,12 +1,100 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d \
-; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
-; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \
-; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
-; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+v -target-abi=ilp32d \
-; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
-; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+v -target-abi=lp64d \
-; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+zvfbfmin,+v \
+; RUN:     -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
+; RUN:     --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+zvfbfmin,+v \
+; RUN:     -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
+; RUN:     --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+zvfbfmin,+v \
+; RUN:     -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
+; RUN:     --check-prefixes=CHECK,ZVFHMIN
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+zvfbfmin,+v \
+; RUN:     -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
+; RUN:     --check-prefixes=CHECK,ZVFHMIN
+
+define <vscale x 1 x bfloat> @nxv1bf16(<vscale x 1 x bfloat> %vm, <vscale x 1 x bfloat> %vs) {
+; CHECK-LABEL: nxv1bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, 8
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vand.vx v9, v9, a0
+; CHECK-NEXT:    addi a0, a0, -1
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:    vor.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %r = call <vscale x 1 x bfloat> @llvm.copysign.nxv1bf16(<vscale x 1 x bfloat> %vm, <vscale x 1 x bfloat> %vs)
+  ret <vscale x 1 x bfloat> %r
+}
+
+define <vscale x 2 x bfloat> @nxv2bf16(<vscale x 2 x bfloat> %vm, <vscale x 2 x bfloat> %vs) {
+; CHECK-LABEL: nxv2bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, 8
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vand.vx v9, v9, a0
+; CHECK-NEXT:    addi a0, a0, -1
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:    vor.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %r = call <vscale x 2 x bfloat> @llvm.copysign.nxv2bf16(<vscale x 2 x bfloat> %vm, <vscale x 2 x bfloat> %vs)
+  ret <vscale x 2 x bfloat> %r
+}
+
+define <vscale x 4 x bfloat> @nxv4bf16(<vscale x 4 x bfloat> %vm, <vscale x 4 x bfloat> %vs) {
+; CHECK-LABEL: nxv4bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, 8
+; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vand.vx v9, v9, a0
+; CHECK-NEXT:    addi a0, a0, -1
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:    vor.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %r = call <vscale x 4 x bfloat> @llvm.copysign.nxv4bf16(<vscale x 4 x bfloat> %vm, <vscale x 4 x bfloat> %vs)
+  ret <vscale x 4 x bfloat> %r
+}
+
+define <vscale x 8 x bfloat> @nxv8bf16(<vscale x 8 x bfloat> %vm, <vscale x 8 x bfloat> %vs) {
+; CHECK-LABEL: nxv8bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, 8
+; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vand.vx v10, v10, a0
+; CHECK-NEXT:    addi a0, a0, -1
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:    vor.vv v8, v8, v10
+; CHECK-NEXT:    ret
+  %r = call <vscale x 8 x bfloat> @llvm.copysign.nxv8bf16(<vscale x 8 x bfloat> %vm, <vscale x 8 x bfloat> %vs)
+  ret <vscale x 8 x bfloat> %r
+}
+
+define <vscale x 16 x bfloat> @nxv16bf16(<vscale x 16 x bfloat> %vm, <vscale x 16 x bfloat> %vs) {
+; CHECK-LABEL: nxv16bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, 8
+; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vand.vx v12, v12, a0
+; CHECK-NEXT:    addi a0, a0, -1
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:    vor.vv v8, v8, v12
+; CHECK-NEXT:    ret
+  %r = call <vscale x 16 x bfloat> @llvm.copysign.nxv16bf16(<vscale x 16 x bfloat> %vm, <vscale x 16 x bfloat> %vs)
+  ret <vscale x 16 x bfloat> %r
+}
+
+define <vscale x 32 x bfloat> @nxv32bf32(<vscale x 32 x bfloat> %vm, <vscale x 32 x bfloat> %vs) {
+; CHECK-LABEL: nxv32bf32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, 8
+; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
+; CHECK-NEXT:    vand.vx v16, v16, a0
+; CHECK-NEXT:    addi a0, a0, -1
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:    vor.vv v8, v8, v16
+; CHECK-NEXT:    ret
+  %r = call <vscale x 32 x bfloat> @llvm.copysign.nxv32bf32(<vscale x 32 x bfloat> %vm, <vscale x 32 x bfloat> %vs)
+  ret <vscale x 32 x bfloat> %r
+}
 
 declare <vscale x 1 x half> @llvm.copysign.nxv1f16(<vscale x 1 x half>, <vscale x 1 x half>)
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode-bf16.ll b/llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode-bf16.ll
deleted file mode 100644
index 8fef7f60d99f64..00000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode-bf16.ll
+++ /dev/null
@@ -1,69 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+zvfbfmin -verify-machineinstrs | FileCheck %s
-; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+zvfbfmin -verify-machineinstrs | FileCheck %s
-
-define <vscale x 1 x bfloat> @nxv1bf16(<vscale x 1 x bfloat> %va) {
-; CHECK-LABEL: nxv1bf16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, 8
-; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
-; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    ret
-  %vb = fneg <vscale x 1 x bfloat> %va
-  ret <vscale x 1 x bfloat> %vb
-}
-
-define <vscale x 2 x bfloat> @nxv2bf16(<vscale x 2 x bfloat> %va) {
-; CHECK-LABEL: nxv2bf16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, 8
-; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
-; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    ret
-  %vb = fneg <vscale x 2 x bfloat> %va
-  ret <vscale x 2 x bfloat> %vb
-}
-
-define <vscale x 4 x bfloat> @nxv4bf16(<vscale x 4 x bfloat> %va) {
-; CHECK-LABEL: nxv4bf16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, 8
-; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
-; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    ret
-  %vb = fneg <vscale x 4 x bfloat> %va
-  ret <vscale x 4 x bfloat> %vb
-}
-
-define <vscale x 8 x bfloat> @nxv8bf16(<vscale x 8 x bfloat> %va) {
-; CHECK-LABEL: nxv8bf16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, 8
-; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
-; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    ret
-  %vb = fneg <vscale x 8 x bfloat> %va
-  ret <vscale x 8 x bfloat> %vb
-}
-
-define <vscale x 16 x bfloat> @nxv16bf16(<vscale x 16 x bfloat> %va) {
-; CHECK-LABEL: nxv16bf16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, 8
-; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    ret
-  %vb = fneg <vscale x 16 x bfloat> %va
-  ret <vscale x 16 x bfloat> %vb
-}
-
-define <vscale x 32 x bfloat> @nxv32bf16(<vscale x 32 x bfloat> %va) {
-; CHECK-LABEL: nxv32bf16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, 8
-; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
-; CHECK-NEXT:    vxor.vx v8, v8, a0
-; CHECK-NEXT:    ret
-  %vb = fneg <vscale x 32 x bfloat> %va
-  ret <vscale x 32 x bfloat> %vb
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode.ll
index 2991e52d4266a6..b4ec691796a71e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode.ll
@@ -1,12 +1,82 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d \
-; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
-; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \
-; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
-; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+v -target-abi=ilp32d \
-; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
-; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+v -target-abi=lp64d \
-; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+zvfbfmin,+v \
+; RUN:     -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
+; RUN:     --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+zvfbfmin,+v \
+; RUN:     -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
+; RUN:     --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+zvfbfmin,+v \
+; RUN:     -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
+; RUN:     --check-prefixes=CHECK,ZVFHMIN
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+zvfbfmin,+v \
+; RUN:     -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
+; RUN:     --check-prefixes=CHECK,ZVFHMIN
+
+define <vscale x 1 x bfloat> @nxv1bf16(<vscale x 1 x bfloat> %va) {
+; CHECK-LABEL: nxv1bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, 8
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vxor.vx v8, v8, a0
+; CHECK-NEXT:    ret
+  %vb = fneg <vscale x 1 x bfloat> %va
+  ret <vscale x 1 x bfloat> %vb
+}
+
+define <vscale x 2 x bfloat> @nxv2bf16(<vscale x 2 x bfloat> %va) {
+; CHECK-LABEL: nxv2bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, 8
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vxor.vx v8, v8, a0
+; CHECK-NEXT:    ret
+  %vb = fneg <vscale x 2 x bfloat> %va
+  ret <vscale x 2 x bfloat> %vb
+}
+
+define <vscale x 4 x bfloat> @nxv4bf16(<vscale x 4 x bfloat> %va) {
+; CHECK-LABEL: nxv4bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, 8
+; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vxor.vx v8, v8, a0
+; CHECK-NEXT:    ret
+  %vb = fneg <vscale x 4 x bfloat> %va
+  ret <vscale x 4 x bfloat> %vb
+}
+
+define <vscale x 8 x bfloat> @nxv8bf16(<vscale x 8 x bfloat> %va) {
+; CHECK-LABEL: nxv8bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, 8
+; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vxor.vx v8, v8, a0
+; CHECK-NEXT:    ret
+  %vb = fneg <vscale x 8 x bfloat> %va
+  ret <vscale x 8 x bfloat> %vb
+}
+
+define <vscale x 16 x bfloat> @nxv16bf16(<vscale x 16 x bfloat> %va) {
+; CHECK-LABEL: nxv16bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, 8
+; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vxor.vx v8, v8, a0
+; CHECK-NEXT:    ret
+  %vb = fneg <vscale x 16 x bfloat> %va
+  ret <vscale x 16 x bfloat> %vb
+}
+
+define <vscale x 32 x bfloat> @nxv32bf16(<vscale x 32 x bfloat> %va) {
+; CHECK-LABEL: nxv32bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    lui a0, 8
+; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
+; CHECK-NEXT:    vxor.vx v8, v8, a0
+; CHECK-NEXT:    ret
+  %vb = fneg <vscale x 32 x bfloat> %va
+  ret <vscale x 32 x bfloat> %vb
+}
 
 define <vscale x 1 x half> @vfneg_vv_nxv1f16(<vscale x 1 x half> %va) {
 ; ZVFH-LABEL: vfneg_vv_nxv1f16:


        


More information about the llvm-commits mailing list