[llvm] 766d27d - [RISCV] Add isel patterns to handle vrsub intrinsic with 2 vector operands.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Thu Apr 1 14:22:58 PDT 2021


Author: Craig Topper
Date: 2021-04-01T14:10:21-07:00
New Revision: 766d27dc857200cd1fbb3a30e62b84ce271fb84f

URL: https://github.com/llvm/llvm-project/commit/766d27dc857200cd1fbb3a30e62b84ce271fb84f
DIFF: https://github.com/llvm/llvm-project/commit/766d27dc857200cd1fbb3a30e62b84ce271fb84f.diff

LOG: [RISCV] Add isel patterns to handle vrsub intrinsic with 2 vector operands.

This occurs when we type legalize an i64 scalar input on RV32. We
need to manually splat, which requires a vector input. Rather
than special case this in lowering just pattern match it.

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
    llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 7ac25091cde9..37bb46ed7da5 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -3279,6 +3279,30 @@ defm PseudoVSUB        : VPseudoBinaryV_VV_VX;
 defm PseudoVRSUB       : VPseudoBinaryV_VX_VI;
 
 foreach vti = AllIntegerVectors in {
+  // Match vrsub with 2 vector operands to vsub.vv by swapping operands. This
+  // Occurs when legalizing vrsub.vx intrinsics for i64 on RV32 since we need
+  // to use a more complex splat sequence. Add the pattern for all VTs for
+  // consistency.
+  def : Pat<(vti.Vector (int_riscv_vrsub (vti.Vector vti.RegClass:$rs2),
+                                         (vti.Vector vti.RegClass:$rs1),
+                                         (XLenVT (VLOp GPR:$vl)))),
+            (!cast<Instruction>("PseudoVSUB_VV_"#vti.LMul.MX) vti.RegClass:$rs1,
+                                                              vti.RegClass:$rs2,
+                                                              GPR:$vl,
+                                                              vti.SEW)>;
+  def : Pat<(vti.Vector (int_riscv_vrsub_mask (vti.Vector vti.RegClass:$merge),
+                                              (vti.Vector vti.RegClass:$rs2),
+                                              (vti.Vector vti.RegClass:$rs1),
+                                              (vti.Mask V0),
+                                              (XLenVT (VLOp GPR:$vl)))),
+            (!cast<Instruction>("PseudoVSUB_VV_"#vti.LMul.MX#"_MASK")
+                                                      vti.RegClass:$merge,
+                                                      vti.RegClass:$rs1,
+                                                      vti.RegClass:$rs2,
+                                                      (vti.Mask V0),
+                                                      GPR:$vl,
+                                                      vti.SEW)>;
+
   // Match VSUB with a small immediate to vadd.vi by negating the immediate.
   def : Pat<(vti.Vector (int_riscv_vsub (vti.Vector vti.RegClass:$rs1),
                                         (vti.Scalar simm5_plus1:$rs2),

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll
index 79cefc6be1d3..66872327ce39 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll
@@ -793,6 +793,252 @@ entry:
   ret <vscale x 16 x i32> %a
 }
 
+declare <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64(
+  <vscale x 1 x i64>,
+  i64,
+  i32);
+
+define <vscale x 1 x i64> @intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vmv.v.x v25, a1
+; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    vsll.vx v25, v25, a1
+; CHECK-NEXT:    vmv.v.x v26, a0
+; CHECK-NEXT:    vsll.vx v26, v26, a1
+; CHECK-NEXT:    vsrl.vx v26, v26, a1
+; CHECK-NEXT:    vor.vv v25, v26, v25
+; CHECK-NEXT:    vsub.vv v8, v25, v8
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64(
+    <vscale x 1 x i64> %0,
+    i64 %1,
+    i32 %2)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vrsub.mask.nxv1i64.i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>,
+  i64,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i64> @intrinsic_vrsub_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i64_nxv1i64_i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vmv.v.x v25, a1
+; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    vsll.vx v25, v25, a1
+; CHECK-NEXT:    vmv.v.x v26, a0
+; CHECK-NEXT:    vsll.vx v26, v26, a1
+; CHECK-NEXT:    vsrl.vx v26, v26, a1
+; CHECK-NEXT:    vor.vv v25, v26, v25
+; CHECK-NEXT:    vsetvli a0, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vsub.vv v8, v25, v9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vrsub.mask.nxv1i64.i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    i64 %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64(
+  <vscale x 2 x i64>,
+  i64,
+  i32);
+
+define <vscale x 2 x i64> @intrinsic_vrsub_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrsub_vx_nxv2i64_nxv2i64_i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vmv.v.x v26, a1
+; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    vsll.vx v26, v26, a1
+; CHECK-NEXT:    vmv.v.x v28, a0
+; CHECK-NEXT:    vsll.vx v28, v28, a1
+; CHECK-NEXT:    vsrl.vx v28, v28, a1
+; CHECK-NEXT:    vor.vv v26, v28, v26
+; CHECK-NEXT:    vsub.vv v8, v26, v8
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64(
+    <vscale x 2 x i64> %0,
+    i64 %1,
+    i32 %2)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vrsub.mask.nxv2i64.i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>,
+  i64,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i64> @intrinsic_vrsub_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv2i64_nxv2i64_i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vmv.v.x v26, a1
+; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    vsll.vx v26, v26, a1
+; CHECK-NEXT:    vmv.v.x v28, a0
+; CHECK-NEXT:    vsll.vx v28, v28, a1
+; CHECK-NEXT:    vsrl.vx v28, v28, a1
+; CHECK-NEXT:    vor.vv v26, v28, v26
+; CHECK-NEXT:    vsetvli a0, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vsub.vv v8, v26, v10, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vrsub.mask.nxv2i64.i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    i64 %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64(
+  <vscale x 4 x i64>,
+  i64,
+  i32);
+
+define <vscale x 4 x i64> @intrinsic_vrsub_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrsub_vx_nxv4i64_nxv4i64_i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e64,m4,ta,mu
+; CHECK-NEXT:    vmv.v.x v28, a1
+; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    vsll.vx v28, v28, a1
+; CHECK-NEXT:    vmv.v.x v12, a0
+; CHECK-NEXT:    vsll.vx v12, v12, a1
+; CHECK-NEXT:    vsrl.vx v12, v12, a1
+; CHECK-NEXT:    vor.vv v28, v12, v28
+; CHECK-NEXT:    vsub.vv v8, v28, v8
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64(
+    <vscale x 4 x i64> %0,
+    i64 %1,
+    i32 %2)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vrsub.mask.nxv4i64.i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>,
+  i64,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i64> @intrinsic_vrsub_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv4i64_nxv4i64_i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e64,m4,ta,mu
+; CHECK-NEXT:    vmv.v.x v28, a1
+; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    vsll.vx v28, v28, a1
+; CHECK-NEXT:    vmv.v.x v16, a0
+; CHECK-NEXT:    vsll.vx v16, v16, a1
+; CHECK-NEXT:    vsrl.vx v16, v16, a1
+; CHECK-NEXT:    vor.vv v28, v16, v28
+; CHECK-NEXT:    vsetvli a0, a2, e64,m4,tu,mu
+; CHECK-NEXT:    vsub.vv v8, v28, v12, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vrsub.mask.nxv4i64.i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    i64 %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64(
+  <vscale x 8 x i64>,
+  i64,
+  i32);
+
+define <vscale x 8 x i64> @intrinsic_vrsub_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrsub_vx_nxv8i64_nxv8i64_i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e64,m8,ta,mu
+; CHECK-NEXT:    vmv.v.x v16, a1
+; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    vsll.vx v16, v16, a1
+; CHECK-NEXT:    vmv.v.x v24, a0
+; CHECK-NEXT:    vsll.vx v24, v24, a1
+; CHECK-NEXT:    vsrl.vx v24, v24, a1
+; CHECK-NEXT:    vor.vv v16, v24, v16
+; CHECK-NEXT:    vsub.vv v8, v16, v8
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64(
+    <vscale x 8 x i64> %0,
+    i64 %1,
+    i32 %2)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vrsub.mask.nxv8i64.i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>,
+  i64,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i64> @intrinsic_vrsub_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv8i64_nxv8i64_i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    csrrs a3, vlenb, zero
+; CHECK-NEXT:    sub sp, sp, a3
+; CHECK-NEXT:    addi a3, sp, 16
+; CHECK-NEXT:    vs1r.v v0, (a3) # Unknown-size Folded Spill
+; CHECK-NEXT:    vsetvli a3, a2, e64,m8,ta,mu
+; CHECK-NEXT:    vmv.v.x v24, a1
+; CHECK-NEXT:    addi a1, zero, 32
+; CHECK-NEXT:    vsll.vx v0, v24, a1
+; CHECK-NEXT:    vmv.v.x v24, a0
+; CHECK-NEXT:    vsll.vx v24, v24, a1
+; CHECK-NEXT:    vsrl.vx v24, v24, a1
+; CHECK-NEXT:    vor.vv v24, v24, v0
+; CHECK-NEXT:    vsetvli a0, a2, e64,m8,tu,mu
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    vl1re8.v v0, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT:    vsub.vv v8, v24, v16, v0.t
+; CHECK-NEXT:    csrrs a0, vlenb, zero
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vrsub.mask.nxv8i64.i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    i64 %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i64> %a
+}
+
 define <vscale x 1 x i8> @intrinsic_vrsub_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
 ; CHECK-LABEL: intrinsic_vrsub_vi_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
@@ -1368,3 +1614,131 @@ entry:
 
   ret <vscale x 16 x i32> %a
 }
+
+define <vscale x 1 x i64> @intrinsic_vrsub_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrsub_vi_nxv1i64_nxv1i64_i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vrsub.vi v8, v8, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64(
+    <vscale x 1 x i64> %0,
+    i64 9,
+    i32 %1)
+
+  ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 1 x i64> @intrinsic_vrsub_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i64_nxv1i64_i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vrsub.vi v8, v9, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vrsub.mask.nxv1i64.i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    i64 9,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 2 x i64> @intrinsic_vrsub_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrsub_vi_nxv2i64_nxv2i64_i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vrsub.vi v8, v8, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64(
+    <vscale x 2 x i64> %0,
+    i64 9,
+    i32 %1)
+
+  ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 2 x i64> @intrinsic_vrsub_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i64_nxv2i64_i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vrsub.vi v8, v10, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vrsub.mask.nxv2i64.i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    i64 9,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 4 x i64> @intrinsic_vrsub_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrsub_vi_nxv4i64_nxv4i64_i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vrsub.vi v8, v8, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64(
+    <vscale x 4 x i64> %0,
+    i64 9,
+    i32 %1)
+
+  ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 4 x i64> @intrinsic_vrsub_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i64_nxv4i64_i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
+; CHECK-NEXT:    vrsub.vi v8, v12, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vrsub.mask.nxv4i64.i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    i64 9,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 8 x i64> @intrinsic_vrsub_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrsub_vi_nxv8i64_nxv8i64_i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vrsub.vi v8, v8, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64(
+    <vscale x 8 x i64> %0,
+    i64 9,
+    i32 %1)
+
+  ret <vscale x 8 x i64> %a
+}
+
+define <vscale x 8 x i64> @intrinsic_vrsub_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i64_nxv8i64_i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
+; CHECK-NEXT:    vrsub.vi v8, v16, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vrsub.mask.nxv8i64.i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    i64 9,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i64> %a
+}


        


More information about the llvm-commits mailing list