[llvm] c29d6c4 - [RISCV] Add patterns for vector widening floating-point add/subtract instructions
via llvm-commits
llvm-commits at lists.llvm.org
Mon Jan 17 18:51:24 PST 2022
Author: jacquesguan
Date: 2022-01-18T10:33:56+08:00
New Revision: c29d6c410e769938ed5db1090a1b894cf6061a4e
URL: https://github.com/llvm/llvm-project/commit/c29d6c410e769938ed5db1090a1b894cf6061a4e
DIFF: https://github.com/llvm/llvm-project/commit/c29d6c410e769938ed5db1090a1b894cf6061a4e.diff
LOG: [RISCV] Add patterns for vector widening floating-point add/subtract instructions
Add patterns for Vector Widening Floating-Point Add/Subtract Instructions
Differential Revision: https://reviews.llvm.org/D117466
Added:
llvm/test/CodeGen/RISCV/rvv/vfwadd-sdnode.ll
llvm/test/CodeGen/RISCV/rvv/vfwsub-sdnode.ll
Modified:
llvm/lib/Target/RISCV/RISCVInstrInfo.td
llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
index 8be7132dbd9a..dd1627231db4 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
@@ -1046,6 +1046,11 @@ def anyext_oneuse : PatFrag<(ops node:$A), (anyext node:$A), [{
return N->hasOneUse();
}]>;
+def fpext_oneuse : PatFrag<(ops node:$A),
+ (any_fpextend node:$A), [{
+ return N->hasOneUse();
+}]>;
+
/// Simple arithmetic operations
def : PatGprGpr<add, ADD>;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
index c52312bfabbe..70aa0febd36f 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
@@ -413,6 +413,41 @@ multiclass VPatWidenMulAddSDNode_VX<PatFrags extop1, PatFrags extop2, string ins
}
}
+multiclass VPatWidenBinaryFPSDNode_VV_VF<SDNode op, string instruction_name> {
+ foreach vti = AllWidenableFloatVectors in {
+ def : Pat<(op (vti.Wti.Vector (fpext_oneuse (vti.Vti.Vector vti.Vti.RegClass:$rs2))),
+ (vti.Wti.Vector (fpext_oneuse (vti.Vti.Vector vti.Vti.RegClass:$rs1)))),
+ (!cast<Instruction>(instruction_name#"_VV_"#vti.Vti.LMul.MX)
+ vti.Vti.RegClass:$rs2, vti.Vti.RegClass:$rs1,
+ vti.Vti.AVL, vti.Vti.Log2SEW)>;
+ def : Pat<(op (vti.Wti.Vector (fpext_oneuse (vti.Vti.Vector vti.Vti.RegClass:$rs2))),
+ (vti.Wti.Vector (fpext_oneuse (vti.Vti.Vector (SplatPat vti.Vti.ScalarRegClass:$rs1))))),
+ (!cast<Instruction>(instruction_name#"_V"#vti.Vti.ScalarSuffix#"_"#vti.Vti.LMul.MX)
+ vti.Vti.RegClass:$rs2, vti.Vti.ScalarRegClass:$rs1,
+ vti.Vti.AVL, vti.Vti.Log2SEW)>;
+ }
+}
+
+multiclass VPatWidenBinaryFPSDNode_WV_WF<SDNode op, string instruction_name> {
+ foreach vti = AllWidenableFloatVectors in {
+ def : Pat<(op (vti.Wti.Vector vti.Wti.RegClass:$rs2),
+ (vti.Wti.Vector (fpext_oneuse (vti.Vti.Vector vti.Vti.RegClass:$rs1)))),
+ (!cast<Instruction>(instruction_name#"_WV_"#vti.Vti.LMul.MX)
+ vti.Wti.RegClass:$rs2, vti.Vti.RegClass:$rs1,
+ vti.Vti.AVL, vti.Vti.Log2SEW)>;
+ def : Pat<(op (vti.Wti.Vector vti.Wti.RegClass:$rs2),
+ (vti.Wti.Vector (fpext_oneuse (vti.Vti.Vector (SplatPat vti.Vti.ScalarRegClass:$rs1))))),
+ (!cast<Instruction>(instruction_name#"_W"#vti.Vti.ScalarSuffix#"_"#vti.Vti.LMul.MX)
+ vti.Wti.RegClass:$rs2, vti.Vti.ScalarRegClass:$rs1,
+ vti.Vti.AVL, vti.Vti.Log2SEW)>;
+ }
+}
+
+multiclass VPatWidenBinaryFPSDNode_VV_VF_WV_WF<SDNode op, string instruction_name> {
+ defm : VPatWidenBinaryFPSDNode_VV_VF<op, instruction_name>;
+ defm : VPatWidenBinaryFPSDNode_WV_WF<op, instruction_name>;
+}
+
//===----------------------------------------------------------------------===//
// Patterns.
//===----------------------------------------------------------------------===//
@@ -650,6 +685,10 @@ defm : VPatBinaryFPSDNode_VV_VF<fadd, "PseudoVFADD">;
defm : VPatBinaryFPSDNode_VV_VF<fsub, "PseudoVFSUB">;
defm : VPatBinaryFPSDNode_R_VF<fsub, "PseudoVFRSUB">;
+// 14.3. Vector Widening Floating-Point Add/Subtract Instructions
+defm : VPatWidenBinaryFPSDNode_VV_VF_WV_WF<fadd, "PseudoVFWADD">;
+defm : VPatWidenBinaryFPSDNode_VV_VF_WV_WF<fsub, "PseudoVFWSUB">;
+
// 14.4. Vector Single-Width Floating-Point Multiply/Divide Instructions
defm : VPatBinaryFPSDNode_VV_VF<fmul, "PseudoVFMUL">;
defm : VPatBinaryFPSDNode_VV_VF<fdiv, "PseudoVFDIV">;
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd-sdnode.ll
new file mode 100644
index 000000000000..486d51182b65
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd-sdnode.ll
@@ -0,0 +1,217 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-v -target-abi=ilp32d \
+; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-v -target-abi=lp64d \
+; RUN: -verify-machineinstrs < %s | FileCheck %s
+
+define <vscale x 1 x double> @vfwadd_vv_nxv1f64(<vscale x 1 x float> %va, <vscale x 1 x float> %vb) {
+; CHECK-LABEL: vfwadd_vv_nxv1f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
+; CHECK-NEXT: vfwadd.vv v10, v8, v9
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: ret
+ %vc = fpext <vscale x 1 x float> %va to <vscale x 1 x double>
+ %vd = fpext <vscale x 1 x float> %vb to <vscale x 1 x double>
+ %ve = fadd <vscale x 1 x double> %vc, %vd
+ ret <vscale x 1 x double> %ve
+}
+
+define <vscale x 1 x double> @vfwadd_vf_nxv1f64(<vscale x 1 x float> %va, float %b) {
+; CHECK-LABEL: vfwadd_vf_nxv1f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
+; CHECK-NEXT: vfwadd.vf v9, v8, fa0
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 1 x float> undef, float %b, i32 0
+ %splat = shufflevector <vscale x 1 x float> %head, <vscale x 1 x float> undef, <vscale x 1 x i32> zeroinitializer
+ %vc = fpext <vscale x 1 x float> %va to <vscale x 1 x double>
+ %vd = fpext <vscale x 1 x float> %splat to <vscale x 1 x double>
+ %ve = fadd <vscale x 1 x double> %vc, %vd
+ ret <vscale x 1 x double> %ve
+}
+
+define <vscale x 1 x double> @vfwadd_wv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x float> %vb) {
+; CHECK-LABEL: vfwadd_wv_nxv1f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
+; CHECK-NEXT: vfwadd.wv v10, v8, v9
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: ret
+ %vc = fpext <vscale x 1 x float> %vb to <vscale x 1 x double>
+ %vd = fadd <vscale x 1 x double> %va, %vc
+ ret <vscale x 1 x double> %vd
+}
+
+define <vscale x 1 x double> @vfwadd_wf_nxv1f64(<vscale x 1 x double> %va, float %b) {
+; CHECK-LABEL: vfwadd_wf_nxv1f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
+; CHECK-NEXT: vfwadd.wf v8, v8, fa0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 1 x float> undef, float %b, i32 0
+ %splat = shufflevector <vscale x 1 x float> %head, <vscale x 1 x float> undef, <vscale x 1 x i32> zeroinitializer
+ %vc = fpext <vscale x 1 x float> %splat to <vscale x 1 x double>
+ %vd = fadd <vscale x 1 x double> %va, %vc
+ ret <vscale x 1 x double> %vd
+}
+
+define <vscale x 2 x double> @vfwadd_vv_nxv2f64(<vscale x 2 x float> %va, <vscale x 2 x float> %vb) {
+; CHECK-LABEL: vfwadd_vv_nxv2f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
+; CHECK-NEXT: vfwadd.vv v10, v8, v9
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: ret
+ %vc = fpext <vscale x 2 x float> %va to <vscale x 2 x double>
+ %vd = fpext <vscale x 2 x float> %vb to <vscale x 2 x double>
+ %ve = fadd <vscale x 2 x double> %vc, %vd
+ ret <vscale x 2 x double> %ve
+}
+
+define <vscale x 2 x double> @vfwadd_vf_nxv2f64(<vscale x 2 x float> %va, float %b) {
+; CHECK-LABEL: vfwadd_vf_nxv2f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
+; CHECK-NEXT: vfwadd.vf v10, v8, fa0
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 2 x float> undef, float %b, i32 0
+ %splat = shufflevector <vscale x 2 x float> %head, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer
+ %vc = fpext <vscale x 2 x float> %va to <vscale x 2 x double>
+ %vd = fpext <vscale x 2 x float> %splat to <vscale x 2 x double>
+ %ve = fadd <vscale x 2 x double> %vc, %vd
+ ret <vscale x 2 x double> %ve
+}
+
+define <vscale x 2 x double> @vfwadd_wv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x float> %vb) {
+; CHECK-LABEL: vfwadd_wv_nxv2f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
+; CHECK-NEXT: vfwadd.wv v12, v8, v10
+; CHECK-NEXT: vmv2r.v v8, v12
+; CHECK-NEXT: ret
+ %vc = fpext <vscale x 2 x float> %vb to <vscale x 2 x double>
+ %vd = fadd <vscale x 2 x double> %va, %vc
+ ret <vscale x 2 x double> %vd
+}
+
+define <vscale x 2 x double> @vfwadd_wf_nxv2f64(<vscale x 2 x double> %va, float %b) {
+; CHECK-LABEL: vfwadd_wf_nxv2f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
+; CHECK-NEXT: vfwadd.wf v8, v8, fa0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 2 x float> undef, float %b, i32 0
+ %splat = shufflevector <vscale x 2 x float> %head, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer
+ %vc = fpext <vscale x 2 x float> %splat to <vscale x 2 x double>
+ %vd = fadd <vscale x 2 x double> %va, %vc
+ ret <vscale x 2 x double> %vd
+}
+
+define <vscale x 4 x double> @vfwadd_vv_nxv4f64(<vscale x 4 x float> %va, <vscale x 4 x float> %vb) {
+; CHECK-LABEL: vfwadd_vv_nxv4f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
+; CHECK-NEXT: vfwadd.vv v12, v8, v10
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: ret
+ %vc = fpext <vscale x 4 x float> %va to <vscale x 4 x double>
+ %vd = fpext <vscale x 4 x float> %vb to <vscale x 4 x double>
+ %ve = fadd <vscale x 4 x double> %vc, %vd
+ ret <vscale x 4 x double> %ve
+}
+
+define <vscale x 4 x double> @vfwadd_vf_nxv4f64(<vscale x 4 x float> %va, float %b) {
+; CHECK-LABEL: vfwadd_vf_nxv4f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
+; CHECK-NEXT: vfwadd.vf v12, v8, fa0
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 4 x float> undef, float %b, i32 0
+ %splat = shufflevector <vscale x 4 x float> %head, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
+ %vc = fpext <vscale x 4 x float> %va to <vscale x 4 x double>
+ %vd = fpext <vscale x 4 x float> %splat to <vscale x 4 x double>
+ %ve = fadd <vscale x 4 x double> %vc, %vd
+ ret <vscale x 4 x double> %ve
+}
+
+define <vscale x 4 x double> @vfwadd_wv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x float> %vb) {
+; CHECK-LABEL: vfwadd_wv_nxv4f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
+; CHECK-NEXT: vfwadd.wv v16, v8, v12
+; CHECK-NEXT: vmv4r.v v8, v16
+; CHECK-NEXT: ret
+ %vc = fpext <vscale x 4 x float> %vb to <vscale x 4 x double>
+ %vd = fadd <vscale x 4 x double> %va, %vc
+ ret <vscale x 4 x double> %vd
+}
+
+define <vscale x 4 x double> @vfwadd_wf_nxv4f64(<vscale x 4 x double> %va, float %b) {
+; CHECK-LABEL: vfwadd_wf_nxv4f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
+; CHECK-NEXT: vfwadd.wf v8, v8, fa0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 4 x float> undef, float %b, i32 0
+ %splat = shufflevector <vscale x 4 x float> %head, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
+ %vc = fpext <vscale x 4 x float> %splat to <vscale x 4 x double>
+ %vd = fadd <vscale x 4 x double> %va, %vc
+ ret <vscale x 4 x double> %vd
+}
+
+define <vscale x 8 x double> @vfwadd_vv_nxv8f64(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
+; CHECK-LABEL: vfwadd_vv_nxv8f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
+; CHECK-NEXT: vfwadd.vv v16, v8, v12
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: ret
+ %vc = fpext <vscale x 8 x float> %va to <vscale x 8 x double>
+ %vd = fpext <vscale x 8 x float> %vb to <vscale x 8 x double>
+ %ve = fadd <vscale x 8 x double> %vc, %vd
+ ret <vscale x 8 x double> %ve
+}
+
+define <vscale x 8 x double> @vfwadd_vf_nxv8f64(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: vfwadd_vf_nxv8f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
+; CHECK-NEXT: vfwadd.vf v16, v8, fa0
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+ %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = fpext <vscale x 8 x float> %va to <vscale x 8 x double>
+ %vd = fpext <vscale x 8 x float> %splat to <vscale x 8 x double>
+ %ve = fadd <vscale x 8 x double> %vc, %vd
+ ret <vscale x 8 x double> %ve
+}
+
+define <vscale x 8 x double> @vfwadd_wv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x float> %vb) {
+; CHECK-LABEL: vfwadd_wv_nxv8f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
+; CHECK-NEXT: vfwadd.wv v24, v8, v16
+; CHECK-NEXT: vmv8r.v v8, v24
+; CHECK-NEXT: ret
+ %vc = fpext <vscale x 8 x float> %vb to <vscale x 8 x double>
+ %vd = fadd <vscale x 8 x double> %va, %vc
+ ret <vscale x 8 x double> %vd
+}
+
+define <vscale x 8 x double> @vfwadd_wf_nxv8f64(<vscale x 8 x double> %va, float %b) {
+; CHECK-LABEL: vfwadd_wf_nxv8f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
+; CHECK-NEXT: vfwadd.wf v8, v8, fa0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+ %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = fpext <vscale x 8 x float> %splat to <vscale x 8 x double>
+ %vd = fadd <vscale x 8 x double> %va, %vc
+ ret <vscale x 8 x double> %vd
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub-sdnode.ll
new file mode 100644
index 000000000000..8e2ef3f76256
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub-sdnode.ll
@@ -0,0 +1,217 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-v -target-abi=ilp32d \
+; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-v -target-abi=lp64d \
+; RUN: -verify-machineinstrs < %s | FileCheck %s
+
+define <vscale x 1 x double> @vfwsub_vv_nxv1f64(<vscale x 1 x float> %va, <vscale x 1 x float> %vb) {
+; CHECK-LABEL: vfwsub_vv_nxv1f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
+; CHECK-NEXT: vfwsub.vv v10, v8, v9
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: ret
+ %vc = fpext <vscale x 1 x float> %va to <vscale x 1 x double>
+ %vd = fpext <vscale x 1 x float> %vb to <vscale x 1 x double>
+ %ve = fsub <vscale x 1 x double> %vc, %vd
+ ret <vscale x 1 x double> %ve
+}
+
+define <vscale x 1 x double> @vfwsub_vf_nxv1f64(<vscale x 1 x float> %va, float %b) {
+; CHECK-LABEL: vfwsub_vf_nxv1f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
+; CHECK-NEXT: vfwsub.vf v9, v8, fa0
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 1 x float> undef, float %b, i32 0
+ %splat = shufflevector <vscale x 1 x float> %head, <vscale x 1 x float> undef, <vscale x 1 x i32> zeroinitializer
+ %vc = fpext <vscale x 1 x float> %va to <vscale x 1 x double>
+ %vd = fpext <vscale x 1 x float> %splat to <vscale x 1 x double>
+ %ve = fsub <vscale x 1 x double> %vc, %vd
+ ret <vscale x 1 x double> %ve
+}
+
+define <vscale x 1 x double> @vfwsub_wv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x float> %vb) {
+; CHECK-LABEL: vfwsub_wv_nxv1f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
+; CHECK-NEXT: vfwsub.wv v10, v8, v9
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: ret
+ %vc = fpext <vscale x 1 x float> %vb to <vscale x 1 x double>
+ %vd = fsub <vscale x 1 x double> %va, %vc
+ ret <vscale x 1 x double> %vd
+}
+
+define <vscale x 1 x double> @vfwsub_wf_nxv1f64(<vscale x 1 x double> %va, float %b) {
+; CHECK-LABEL: vfwsub_wf_nxv1f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
+; CHECK-NEXT: vfwsub.wf v8, v8, fa0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 1 x float> undef, float %b, i32 0
+ %splat = shufflevector <vscale x 1 x float> %head, <vscale x 1 x float> undef, <vscale x 1 x i32> zeroinitializer
+ %vc = fpext <vscale x 1 x float> %splat to <vscale x 1 x double>
+ %vd = fsub <vscale x 1 x double> %va, %vc
+ ret <vscale x 1 x double> %vd
+}
+
+define <vscale x 2 x double> @vfwsub_vv_nxv2f64(<vscale x 2 x float> %va, <vscale x 2 x float> %vb) {
+; CHECK-LABEL: vfwsub_vv_nxv2f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
+; CHECK-NEXT: vfwsub.vv v10, v8, v9
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: ret
+ %vc = fpext <vscale x 2 x float> %va to <vscale x 2 x double>
+ %vd = fpext <vscale x 2 x float> %vb to <vscale x 2 x double>
+ %ve = fsub <vscale x 2 x double> %vc, %vd
+ ret <vscale x 2 x double> %ve
+}
+
+define <vscale x 2 x double> @vfwsub_vf_nxv2f64(<vscale x 2 x float> %va, float %b) {
+; CHECK-LABEL: vfwsub_vf_nxv2f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
+; CHECK-NEXT: vfwsub.vf v10, v8, fa0
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 2 x float> undef, float %b, i32 0
+ %splat = shufflevector <vscale x 2 x float> %head, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer
+ %vc = fpext <vscale x 2 x float> %va to <vscale x 2 x double>
+ %vd = fpext <vscale x 2 x float> %splat to <vscale x 2 x double>
+ %ve = fsub <vscale x 2 x double> %vc, %vd
+ ret <vscale x 2 x double> %ve
+}
+
+define <vscale x 2 x double> @vfwsub_wv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x float> %vb) {
+; CHECK-LABEL: vfwsub_wv_nxv2f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
+; CHECK-NEXT: vfwsub.wv v12, v8, v10
+; CHECK-NEXT: vmv2r.v v8, v12
+; CHECK-NEXT: ret
+ %vc = fpext <vscale x 2 x float> %vb to <vscale x 2 x double>
+ %vd = fsub <vscale x 2 x double> %va, %vc
+ ret <vscale x 2 x double> %vd
+}
+
+define <vscale x 2 x double> @vfwsub_wf_nxv2f64(<vscale x 2 x double> %va, float %b) {
+; CHECK-LABEL: vfwsub_wf_nxv2f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
+; CHECK-NEXT: vfwsub.wf v8, v8, fa0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 2 x float> undef, float %b, i32 0
+ %splat = shufflevector <vscale x 2 x float> %head, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer
+ %vc = fpext <vscale x 2 x float> %splat to <vscale x 2 x double>
+ %vd = fsub <vscale x 2 x double> %va, %vc
+ ret <vscale x 2 x double> %vd
+}
+
+define <vscale x 4 x double> @vfwsub_vv_nxv4f64(<vscale x 4 x float> %va, <vscale x 4 x float> %vb) {
+; CHECK-LABEL: vfwsub_vv_nxv4f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
+; CHECK-NEXT: vfwsub.vv v12, v8, v10
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: ret
+ %vc = fpext <vscale x 4 x float> %va to <vscale x 4 x double>
+ %vd = fpext <vscale x 4 x float> %vb to <vscale x 4 x double>
+ %ve = fsub <vscale x 4 x double> %vc, %vd
+ ret <vscale x 4 x double> %ve
+}
+
+define <vscale x 4 x double> @vfwsub_vf_nxv4f64(<vscale x 4 x float> %va, float %b) {
+; CHECK-LABEL: vfwsub_vf_nxv4f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
+; CHECK-NEXT: vfwsub.vf v12, v8, fa0
+; CHECK-NEXT: vmv4r.v v8, v12
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 4 x float> undef, float %b, i32 0
+ %splat = shufflevector <vscale x 4 x float> %head, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
+ %vc = fpext <vscale x 4 x float> %va to <vscale x 4 x double>
+ %vd = fpext <vscale x 4 x float> %splat to <vscale x 4 x double>
+ %ve = fsub <vscale x 4 x double> %vc, %vd
+ ret <vscale x 4 x double> %ve
+}
+
+define <vscale x 4 x double> @vfwsub_wv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x float> %vb) {
+; CHECK-LABEL: vfwsub_wv_nxv4f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
+; CHECK-NEXT: vfwsub.wv v16, v8, v12
+; CHECK-NEXT: vmv4r.v v8, v16
+; CHECK-NEXT: ret
+ %vc = fpext <vscale x 4 x float> %vb to <vscale x 4 x double>
+ %vd = fsub <vscale x 4 x double> %va, %vc
+ ret <vscale x 4 x double> %vd
+}
+
+define <vscale x 4 x double> @vfwsub_wf_nxv4f64(<vscale x 4 x double> %va, float %b) {
+; CHECK-LABEL: vfwsub_wf_nxv4f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
+; CHECK-NEXT: vfwsub.wf v8, v8, fa0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 4 x float> undef, float %b, i32 0
+ %splat = shufflevector <vscale x 4 x float> %head, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer
+ %vc = fpext <vscale x 4 x float> %splat to <vscale x 4 x double>
+ %vd = fsub <vscale x 4 x double> %va, %vc
+ ret <vscale x 4 x double> %vd
+}
+
+define <vscale x 8 x double> @vfwsub_vv_nxv8f64(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
+; CHECK-LABEL: vfwsub_vv_nxv8f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
+; CHECK-NEXT: vfwsub.vv v16, v8, v12
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: ret
+ %vc = fpext <vscale x 8 x float> %va to <vscale x 8 x double>
+ %vd = fpext <vscale x 8 x float> %vb to <vscale x 8 x double>
+ %ve = fsub <vscale x 8 x double> %vc, %vd
+ ret <vscale x 8 x double> %ve
+}
+
+define <vscale x 8 x double> @vfwsub_vf_nxv8f64(<vscale x 8 x float> %va, float %b) {
+; CHECK-LABEL: vfwsub_vf_nxv8f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
+; CHECK-NEXT: vfwsub.vf v16, v8, fa0
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+ %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = fpext <vscale x 8 x float> %va to <vscale x 8 x double>
+ %vd = fpext <vscale x 8 x float> %splat to <vscale x 8 x double>
+ %ve = fsub <vscale x 8 x double> %vc, %vd
+ ret <vscale x 8 x double> %ve
+}
+
+define <vscale x 8 x double> @vfwsub_wv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x float> %vb) {
+; CHECK-LABEL: vfwsub_wv_nxv8f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
+; CHECK-NEXT: vfwsub.wv v24, v8, v16
+; CHECK-NEXT: vmv8r.v v8, v24
+; CHECK-NEXT: ret
+ %vc = fpext <vscale x 8 x float> %vb to <vscale x 8 x double>
+ %vd = fsub <vscale x 8 x double> %va, %vc
+ ret <vscale x 8 x double> %vd
+}
+
+define <vscale x 8 x double> @vfwsub_wf_nxv8f64(<vscale x 8 x double> %va, float %b) {
+; CHECK-LABEL: vfwsub_wf_nxv8f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
+; CHECK-NEXT: vfwsub.wf v8, v8, fa0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x float> undef, float %b, i32 0
+ %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = fpext <vscale x 8 x float> %splat to <vscale x 8 x double>
+ %vd = fsub <vscale x 8 x double> %va, %vc
+ ret <vscale x 8 x double> %vd
+}
More information about the llvm-commits
mailing list