[llvm] ae82a8c - [RISCV] Add support for scalable vector fneg using vfsgnjn.vv
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Thu Jan 28 09:12:12 PST 2021
Author: Craig Topper
Date: 2021-01-28T09:11:49-08:00
New Revision: ae82a8c86331ea400ed52e4e4a12211995e847b0
URL: https://github.com/llvm/llvm-project/commit/ae82a8c86331ea400ed52e4e4a12211995e847b0
DIFF: https://github.com/llvm/llvm-project/commit/ae82a8c86331ea400ed52e4e4a12211995e847b0.diff
LOG: [RISCV] Add support for scalable vector fneg using vfsgnjn.vv
Reviewed By: frasercrmck
Differential Revision: https://reviews.llvm.org/D95568
Added:
llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode.ll
Modified:
llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
index d86f157f216d..b631722e9efa 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
@@ -514,6 +514,14 @@ defm "" : VPatBinaryFPSDNode_VV_VF<fmul, "PseudoVFMUL">;
defm "" : VPatBinaryFPSDNode_VV_VF<fdiv, "PseudoVFDIV">;
defm "" : VPatBinaryFPSDNode_R_VF<fdiv, "PseudoVFRDIV">;
+// 14.10. Vector Floating-Point Sign-Injection Instructions
+// Handle fneg with VFSGNJN using the same input for both operands.
+foreach vti = AllFloatVectors in {
+ def : Pat<(fneg (vti.Vector vti.RegClass:$rs)),
+ (!cast<Instruction>("PseudoVFSGNJN_VV_"# vti.LMul.MX)
+ vti.RegClass:$rs, vti.RegClass:$rs, vti.AVL, vti.SEW)>;
+}
+
// 14.11. Vector Floating-Point Compare Instructions
defm "" : VPatFPSetCCSDNode_VV_VF_FV<SETEQ, "PseudoVMFEQ", "PseudoVMFEQ">;
defm "" : VPatFPSetCCSDNode_VV_VF_FV<SETOEQ, "PseudoVMFEQ", "PseudoVMFEQ">;
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode.ll
new file mode 100644
index 000000000000..182b5a1244cf
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode.ll
@@ -0,0 +1,155 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=ilp32d \
+; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=ilp32d \
+; RUN: -verify-machineinstrs < %s | FileCheck %s
+
+define <vscale x 1 x half> @vfneg_vv_nxv1f16(<vscale x 1 x half> %va) {
+; CHECK-LABEL: vfneg_vv_nxv1f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT: ret
+ %vb = fneg <vscale x 1 x half> %va
+ ret <vscale x 1 x half> %vb
+}
+
+define <vscale x 2 x half> @vfneg_vv_nxv2f16(<vscale x 2 x half> %va) {
+; CHECK-LABEL: vfneg_vv_nxv2f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu
+; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT: ret
+ %vb = fneg <vscale x 2 x half> %va
+ ret <vscale x 2 x half> %vb
+}
+
+define <vscale x 4 x half> @vfneg_vv_nxv4f16(<vscale x 4 x half> %va) {
+; CHECK-LABEL: vfneg_vv_nxv4f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu
+; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT: ret
+ %vb = fneg <vscale x 4 x half> %va
+ ret <vscale x 4 x half> %vb
+}
+
+define <vscale x 8 x half> @vfneg_vv_nxv8f16(<vscale x 8 x half> %va) {
+; CHECK-LABEL: vfneg_vv_nxv8f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT: ret
+ %vb = fneg <vscale x 8 x half> %va
+ ret <vscale x 8 x half> %vb
+}
+
+define <vscale x 16 x half> @vfneg_vv_nxv16f16(<vscale x 16 x half> %va) {
+; CHECK-LABEL: vfneg_vv_nxv16f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu
+; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT: ret
+ %vb = fneg <vscale x 16 x half> %va
+ ret <vscale x 16 x half> %vb
+}
+
+define <vscale x 32 x half> @vfneg_vv_nxv32f16(<vscale x 32 x half> %va) {
+; CHECK-LABEL: vfneg_vv_nxv32f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu
+; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT: ret
+ %vb = fneg <vscale x 32 x half> %va
+ ret <vscale x 32 x half> %vb
+}
+
+define <vscale x 1 x float> @vfneg_vv_nxv1f32(<vscale x 1 x float> %va) {
+; CHECK-LABEL: vfneg_vv_nxv1f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT: ret
+ %vb = fneg <vscale x 1 x float> %va
+ ret <vscale x 1 x float> %vb
+}
+
+define <vscale x 2 x float> @vfneg_vv_nxv2f32(<vscale x 2 x float> %va) {
+; CHECK-LABEL: vfneg_vv_nxv2f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu
+; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT: ret
+ %vb = fneg <vscale x 2 x float> %va
+ ret <vscale x 2 x float> %vb
+}
+
+define <vscale x 4 x float> @vfneg_vv_nxv4f32(<vscale x 4 x float> %va) {
+; CHECK-LABEL: vfneg_vv_nxv4f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu
+; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT: ret
+ %vb = fneg <vscale x 4 x float> %va
+ ret <vscale x 4 x float> %vb
+}
+
+define <vscale x 8 x float> @vfneg_vv_nxv8f32(<vscale x 8 x float> %va) {
+; CHECK-LABEL: vfneg_vv_nxv8f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT: ret
+ %vb = fneg <vscale x 8 x float> %va
+ ret <vscale x 8 x float> %vb
+}
+
+define <vscale x 16 x float> @vfneg_vv_nxv16f32(<vscale x 16 x float> %va) {
+; CHECK-LABEL: vfneg_vv_nxv16f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu
+; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT: ret
+ %vb = fneg <vscale x 16 x float> %va
+ ret <vscale x 16 x float> %vb
+}
+
+define <vscale x 1 x double> @vfneg_vv_nxv1f64(<vscale x 1 x double> %va) {
+; CHECK-LABEL: vfneg_vv_nxv1f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu
+; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT: ret
+ %vb = fneg <vscale x 1 x double> %va
+ ret <vscale x 1 x double> %vb
+}
+
+define <vscale x 2 x double> @vfneg_vv_nxv2f64(<vscale x 2 x double> %va) {
+; CHECK-LABEL: vfneg_vv_nxv2f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu
+; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT: ret
+ %vb = fneg <vscale x 2 x double> %va
+ ret <vscale x 2 x double> %vb
+}
+
+define <vscale x 4 x double> @vfneg_vv_nxv4f64(<vscale x 4 x double> %va) {
+; CHECK-LABEL: vfneg_vv_nxv4f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu
+; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT: ret
+ %vb = fneg <vscale x 4 x double> %va
+ ret <vscale x 4 x double> %vb
+}
+
+define <vscale x 8 x double> @vfneg_vv_nxv8f64(<vscale x 8 x double> %va) {
+; CHECK-LABEL: vfneg_vv_nxv8f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
+; CHECK-NEXT: ret
+ %vb = fneg <vscale x 8 x double> %va
+ ret <vscale x 8 x double> %vb
+}
More information about the llvm-commits
mailing list