[llvm-branch-commits] [llvm] c9154e8 - [RISCV] Add vector mask arithmetic ISel patterns

Fraser Cormack via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Thu Jan 7 01:54:14 PST 2021


Author: Fraser Cormack
Date: 2021-01-07T09:43:25Z
New Revision: c9154e8fa377d1621e20482dda8a8bb2439a39c4

URL: https://github.com/llvm/llvm-project/commit/c9154e8fa377d1621e20482dda8a8bb2439a39c4
DIFF: https://github.com/llvm/llvm-project/commit/c9154e8fa377d1621e20482dda8a8bb2439a39c4.diff

LOG: [RISCV] Add vector mask arithmetic ISel patterns

The patterns that want to use 'vnot' use a custom PatFrag. This is
because 'vnot' uses immAllOnesV which implicitly uses BUILD_VECTOR
rather than SPLAT_VECTOR.

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D94078

Added: 
    llvm/test/CodeGen/RISCV/rvv/vmarith-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vsplats-i1.ll

Modified: 
    llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
index fa2236cef874..208e50168897 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
@@ -35,6 +35,11 @@ def SplatPat       : ComplexPattern<vAny, 1, "selectVSplat", [], [], 1>;
 def SplatPat_simm5 : ComplexPattern<vAny, 1, "selectVSplatSimm5", []>;
 def SplatPat_uimm5 : ComplexPattern<vAny, 1, "selectVSplatUimm5", []>;
 
+// A mask-vector version of the standard 'vnot' fragment but using splat_vector
+// rather than (the implicit) build_vector
+def riscv_m_vnot : PatFrag<(ops node:$in),
+                           (xor node:$in, (splat_vector (XLenVT 1)))>;
+
 multiclass VPatUSLoadStoreSDNode<LLVMType type,
                                  LLVMType mask_type,
                                  int sew,
@@ -181,6 +186,36 @@ defm "" : VPatBinarySDNode_VV_VX<udiv, "PseudoVDIV">;
 defm "" : VPatBinarySDNode_VV_VX<urem, "PseudoVREMU">;
 defm "" : VPatBinarySDNode_VV_VX<srem, "PseudoVREM">;
 
+// 16.1. Vector Mask-Register Logical Instructions
+foreach mti = AllMasks in {
+  def : Pat<(mti.Mask (and VR:$rs1, VR:$rs2)),
+            (!cast<Instruction>("PseudoVMAND_MM_"#mti.LMul.MX)
+                 VR:$rs1, VR:$rs2, VLMax, mti.SEW)>;
+  def : Pat<(mti.Mask (or VR:$rs1, VR:$rs2)),
+            (!cast<Instruction>("PseudoVMOR_MM_"#mti.LMul.MX)
+                 VR:$rs1, VR:$rs2, VLMax, mti.SEW)>;
+  def : Pat<(mti.Mask (xor VR:$rs1, VR:$rs2)),
+            (!cast<Instruction>("PseudoVMXOR_MM_"#mti.LMul.MX)
+                 VR:$rs1, VR:$rs2, VLMax, mti.SEW)>;
+
+  def : Pat<(mti.Mask (riscv_m_vnot (and VR:$rs1, VR:$rs2))),
+            (!cast<Instruction>("PseudoVMNAND_MM_"#mti.LMul.MX)
+                 VR:$rs1, VR:$rs2, VLMax, mti.SEW)>;
+  def : Pat<(mti.Mask (riscv_m_vnot (or VR:$rs1, VR:$rs2))),
+            (!cast<Instruction>("PseudoVMNOR_MM_"#mti.LMul.MX)
+                 VR:$rs1, VR:$rs2, VLMax, mti.SEW)>;
+  def : Pat<(mti.Mask (riscv_m_vnot (xor VR:$rs1, VR:$rs2))),
+            (!cast<Instruction>("PseudoVMXNOR_MM_"#mti.LMul.MX)
+                 VR:$rs1, VR:$rs2, VLMax, mti.SEW)>;
+
+  def : Pat<(mti.Mask (and VR:$rs1, (riscv_m_vnot VR:$rs2))),
+            (!cast<Instruction>("PseudoVMANDNOT_MM_"#mti.LMul.MX)
+                 VR:$rs1, VR:$rs2, VLMax, mti.SEW)>;
+  def : Pat<(mti.Mask (or VR:$rs1, (riscv_m_vnot VR:$rs2))),
+            (!cast<Instruction>("PseudoVMORNOT_MM_"#mti.LMul.MX)
+                 VR:$rs1, VR:$rs2, VLMax, mti.SEW)>;
+}
+
 } // Predicates = [HasStdExtV]
 
 //===----------------------------------------------------------------------===//
@@ -196,6 +231,13 @@ foreach vti = AllIntegerVectors in {
             (!cast<Instruction>("PseudoVMV_V_I_" # vti.LMul.MX)
               simm5:$rs1, VLMax, vti.SEW)>;
 }
+
+foreach mti = AllMasks in {
+  def : Pat<(mti.Mask (splat_vector (XLenVT 1))),
+            (!cast<Instruction>("PseudoVMSET_M_"#mti.BX) VLMax, mti.SEW)>;
+  def : Pat<(mti.Mask (splat_vector (XLenVT 0))),
+            (!cast<Instruction>("PseudoVMCLR_M_"#mti.BX) VLMax, mti.SEW)>;
+}
 } // Predicates = [HasStdExtV]
 
 let Predicates = [HasStdExtV, IsRV32] in {

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmarith-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmarith-sdnode.ll
new file mode 100644
index 000000000000..cd75d6e50033
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vmarith-sdnode.ll
@@ -0,0 +1,479 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
+
+define <vscale x 1 x i1> @vmand_vv_nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb) {
+; CHECK-LABEL: vmand_vv_nxv1i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf8,ta,mu
+; CHECK-NEXT:    vmand.mm v0, v0, v16
+; CHECK-NEXT:    ret
+  %vc = and <vscale x 1 x i1> %va, %vb
+  ret <vscale x 1 x i1> %vc
+}
+
+define <vscale x 2 x i1> @vmand_vv_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb) {
+; CHECK-LABEL: vmand_vv_nxv2i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf4,ta,mu
+; CHECK-NEXT:    vmand.mm v0, v0, v16
+; CHECK-NEXT:    ret
+  %vc = and <vscale x 2 x i1> %va, %vb
+  ret <vscale x 2 x i1> %vc
+}
+
+define <vscale x 4 x i1> @vmand_vv_nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb) {
+; CHECK-LABEL: vmand_vv_nxv4i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf2,ta,mu
+; CHECK-NEXT:    vmand.mm v0, v0, v16
+; CHECK-NEXT:    ret
+  %vc = and <vscale x 4 x i1> %va, %vb
+  ret <vscale x 4 x i1> %vc
+}
+
+define <vscale x 8 x i1> @vmand_vv_nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb) {
+; CHECK-LABEL: vmand_vv_nxv8i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmand.mm v0, v0, v16
+; CHECK-NEXT:    ret
+  %vc = and <vscale x 8 x i1> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 16 x i1> @vmand_vv_nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb) {
+; CHECK-LABEL: vmand_vv_nxv16i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,m2,ta,mu
+; CHECK-NEXT:    vmand.mm v0, v0, v16
+; CHECK-NEXT:    ret
+  %vc = and <vscale x 16 x i1> %va, %vb
+  ret <vscale x 16 x i1> %vc
+}
+
+define <vscale x 1 x i1> @vmor_vv_nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb) {
+; CHECK-LABEL: vmor_vv_nxv1i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf8,ta,mu
+; CHECK-NEXT:    vmor.mm v0, v0, v16
+; CHECK-NEXT:    ret
+  %vc = or <vscale x 1 x i1> %va, %vb
+  ret <vscale x 1 x i1> %vc
+}
+
+define <vscale x 2 x i1> @vmor_vv_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb) {
+; CHECK-LABEL: vmor_vv_nxv2i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf4,ta,mu
+; CHECK-NEXT:    vmor.mm v0, v0, v16
+; CHECK-NEXT:    ret
+  %vc = or <vscale x 2 x i1> %va, %vb
+  ret <vscale x 2 x i1> %vc
+}
+
+define <vscale x 4 x i1> @vmor_vv_nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb) {
+; CHECK-LABEL: vmor_vv_nxv4i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf2,ta,mu
+; CHECK-NEXT:    vmor.mm v0, v0, v16
+; CHECK-NEXT:    ret
+  %vc = or <vscale x 4 x i1> %va, %vb
+  ret <vscale x 4 x i1> %vc
+}
+
+define <vscale x 8 x i1> @vmor_vv_nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb) {
+; CHECK-LABEL: vmor_vv_nxv8i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmor.mm v0, v0, v16
+; CHECK-NEXT:    ret
+  %vc = or <vscale x 8 x i1> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 16 x i1> @vmor_vv_nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb) {
+; CHECK-LABEL: vmor_vv_nxv16i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,m2,ta,mu
+; CHECK-NEXT:    vmor.mm v0, v0, v16
+; CHECK-NEXT:    ret
+  %vc = or <vscale x 16 x i1> %va, %vb
+  ret <vscale x 16 x i1> %vc
+}
+
+define <vscale x 1 x i1> @vmxor_vv_nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb) {
+; CHECK-LABEL: vmxor_vv_nxv1i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf8,ta,mu
+; CHECK-NEXT:    vmxor.mm v0, v0, v16
+; CHECK-NEXT:    ret
+  %vc = xor <vscale x 1 x i1> %va, %vb
+  ret <vscale x 1 x i1> %vc
+}
+
+define <vscale x 2 x i1> @vmxor_vv_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb) {
+; CHECK-LABEL: vmxor_vv_nxv2i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf4,ta,mu
+; CHECK-NEXT:    vmxor.mm v0, v0, v16
+; CHECK-NEXT:    ret
+  %vc = xor <vscale x 2 x i1> %va, %vb
+  ret <vscale x 2 x i1> %vc
+}
+
+define <vscale x 4 x i1> @vmxor_vv_nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb) {
+; CHECK-LABEL: vmxor_vv_nxv4i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf2,ta,mu
+; CHECK-NEXT:    vmxor.mm v0, v0, v16
+; CHECK-NEXT:    ret
+  %vc = xor <vscale x 4 x i1> %va, %vb
+  ret <vscale x 4 x i1> %vc
+}
+
+define <vscale x 8 x i1> @vmxor_vv_nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb) {
+; CHECK-LABEL: vmxor_vv_nxv8i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmxor.mm v0, v0, v16
+; CHECK-NEXT:    ret
+  %vc = xor <vscale x 8 x i1> %va, %vb
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 16 x i1> @vmxor_vv_nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb) {
+; CHECK-LABEL: vmxor_vv_nxv16i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,m2,ta,mu
+; CHECK-NEXT:    vmxor.mm v0, v0, v16
+; CHECK-NEXT:    ret
+  %vc = xor <vscale x 16 x i1> %va, %vb
+  ret <vscale x 16 x i1> %vc
+}
+
+define <vscale x 1 x i1> @vmnand_vv_nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb) {
+; CHECK-LABEL: vmnand_vv_nxv1i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf8,ta,mu
+; CHECK-NEXT:    vmnand.mm v0, v0, v16
+; CHECK-NEXT:    ret
+  %vc = and <vscale x 1 x i1> %va, %vb
+  %head = insertelement <vscale x 1 x i1> undef, i1 1, i32 0
+  %splat = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer
+  %not = xor <vscale x 1 x i1> %vc, %splat
+  ret <vscale x 1 x i1> %not
+}
+
+define <vscale x 2 x i1> @vmnand_vv_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb) {
+; CHECK-LABEL: vmnand_vv_nxv2i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf4,ta,mu
+; CHECK-NEXT:    vmnand.mm v0, v0, v16
+; CHECK-NEXT:    ret
+  %vc = and <vscale x 2 x i1> %va, %vb
+  %head = insertelement <vscale x 2 x i1> undef, i1 1, i32 0
+  %splat = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
+  %not = xor <vscale x 2 x i1> %vc, %splat
+  ret <vscale x 2 x i1> %not
+}
+
+define <vscale x 4 x i1> @vmnand_vv_nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb) {
+; CHECK-LABEL: vmnand_vv_nxv4i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf2,ta,mu
+; CHECK-NEXT:    vmnand.mm v0, v0, v16
+; CHECK-NEXT:    ret
+  %vc = and <vscale x 4 x i1> %va, %vb
+  %head = insertelement <vscale x 4 x i1> undef, i1 1, i32 0
+  %splat = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
+  %not = xor <vscale x 4 x i1> %vc, %splat
+  ret <vscale x 4 x i1> %not
+}
+
+define <vscale x 8 x i1> @vmnand_vv_nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb) {
+; CHECK-LABEL: vmnand_vv_nxv8i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmnand.mm v0, v0, v16
+; CHECK-NEXT:    ret
+  %vc = and <vscale x 8 x i1> %va, %vb
+  %head = insertelement <vscale x 8 x i1> undef, i1 1, i32 0
+  %splat = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
+  %not = xor <vscale x 8 x i1> %vc, %splat
+  ret <vscale x 8 x i1> %not
+}
+
+define <vscale x 16 x i1> @vmnand_vv_nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb) {
+; CHECK-LABEL: vmnand_vv_nxv16i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,m2,ta,mu
+; CHECK-NEXT:    vmnand.mm v0, v0, v16
+; CHECK-NEXT:    ret
+  %vc = and <vscale x 16 x i1> %va, %vb
+  %head = insertelement <vscale x 16 x i1> undef, i1 1, i32 0
+  %splat = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer
+  %not = xor <vscale x 16 x i1> %vc, %splat
+  ret <vscale x 16 x i1> %not
+}
+
+define <vscale x 1 x i1> @vmnor_vv_nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb) {
+; CHECK-LABEL: vmnor_vv_nxv1i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf8,ta,mu
+; CHECK-NEXT:    vmnor.mm v0, v0, v16
+; CHECK-NEXT:    ret
+  %vc = or <vscale x 1 x i1> %va, %vb
+  %head = insertelement <vscale x 1 x i1> undef, i1 1, i32 0
+  %splat = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer
+  %not = xor <vscale x 1 x i1> %vc, %splat
+  ret <vscale x 1 x i1> %not
+}
+
+define <vscale x 2 x i1> @vmnor_vv_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb) {
+; CHECK-LABEL: vmnor_vv_nxv2i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf4,ta,mu
+; CHECK-NEXT:    vmnor.mm v0, v0, v16
+; CHECK-NEXT:    ret
+  %vc = or <vscale x 2 x i1> %va, %vb
+  %head = insertelement <vscale x 2 x i1> undef, i1 1, i32 0
+  %splat = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
+  %not = xor <vscale x 2 x i1> %vc, %splat
+  ret <vscale x 2 x i1> %not
+}
+
+define <vscale x 4 x i1> @vmnor_vv_nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb) {
+; CHECK-LABEL: vmnor_vv_nxv4i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf2,ta,mu
+; CHECK-NEXT:    vmnor.mm v0, v0, v16
+; CHECK-NEXT:    ret
+  %vc = or <vscale x 4 x i1> %va, %vb
+  %head = insertelement <vscale x 4 x i1> undef, i1 1, i32 0
+  %splat = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
+  %not = xor <vscale x 4 x i1> %vc, %splat
+  ret <vscale x 4 x i1> %not
+}
+
+define <vscale x 8 x i1> @vmnor_vv_nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb) {
+; CHECK-LABEL: vmnor_vv_nxv8i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmnor.mm v0, v0, v16
+; CHECK-NEXT:    ret
+  %vc = or <vscale x 8 x i1> %va, %vb
+  %head = insertelement <vscale x 8 x i1> undef, i1 1, i32 0
+  %splat = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
+  %not = xor <vscale x 8 x i1> %vc, %splat
+  ret <vscale x 8 x i1> %not
+}
+
+define <vscale x 16 x i1> @vmnor_vv_nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb) {
+; CHECK-LABEL: vmnor_vv_nxv16i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,m2,ta,mu
+; CHECK-NEXT:    vmnor.mm v0, v0, v16
+; CHECK-NEXT:    ret
+  %vc = or <vscale x 16 x i1> %va, %vb
+  %head = insertelement <vscale x 16 x i1> undef, i1 1, i32 0
+  %splat = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer
+  %not = xor <vscale x 16 x i1> %vc, %splat
+  ret <vscale x 16 x i1> %not
+}
+
+define <vscale x 1 x i1> @vmxnor_vv_nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb) {
+; CHECK-LABEL: vmxnor_vv_nxv1i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf8,ta,mu
+; CHECK-NEXT:    vmxnor.mm v0, v0, v16
+; CHECK-NEXT:    ret
+  %vc = xor <vscale x 1 x i1> %va, %vb
+  %head = insertelement <vscale x 1 x i1> undef, i1 1, i32 0
+  %splat = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer
+  %not = xor <vscale x 1 x i1> %vc, %splat
+  ret <vscale x 1 x i1> %not
+}
+
+define <vscale x 2 x i1> @vmxnor_vv_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb) {
+; CHECK-LABEL: vmxnor_vv_nxv2i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf4,ta,mu
+; CHECK-NEXT:    vmxnor.mm v0, v0, v16
+; CHECK-NEXT:    ret
+  %vc = xor <vscale x 2 x i1> %va, %vb
+  %head = insertelement <vscale x 2 x i1> undef, i1 1, i32 0
+  %splat = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
+  %not = xor <vscale x 2 x i1> %vc, %splat
+  ret <vscale x 2 x i1> %not
+}
+
+define <vscale x 4 x i1> @vmxnor_vv_nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb) {
+; CHECK-LABEL: vmxnor_vv_nxv4i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf2,ta,mu
+; CHECK-NEXT:    vmxnor.mm v0, v0, v16
+; CHECK-NEXT:    ret
+  %vc = xor <vscale x 4 x i1> %va, %vb
+  %head = insertelement <vscale x 4 x i1> undef, i1 1, i32 0
+  %splat = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
+  %not = xor <vscale x 4 x i1> %vc, %splat
+  ret <vscale x 4 x i1> %not
+}
+
+define <vscale x 8 x i1> @vmxnor_vv_nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb) {
+; CHECK-LABEL: vmxnor_vv_nxv8i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmxnor.mm v0, v0, v16
+; CHECK-NEXT:    ret
+  %vc = xor <vscale x 8 x i1> %va, %vb
+  %head = insertelement <vscale x 8 x i1> undef, i1 1, i32 0
+  %splat = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
+  %not = xor <vscale x 8 x i1> %vc, %splat
+  ret <vscale x 8 x i1> %not
+}
+
+define <vscale x 16 x i1> @vmxnor_vv_nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb) {
+; CHECK-LABEL: vmxnor_vv_nxv16i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,m2,ta,mu
+; CHECK-NEXT:    vmxnor.mm v0, v0, v16
+; CHECK-NEXT:    ret
+  %vc = xor <vscale x 16 x i1> %va, %vb
+  %head = insertelement <vscale x 16 x i1> undef, i1 1, i32 0
+  %splat = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer
+  %not = xor <vscale x 16 x i1> %vc, %splat
+  ret <vscale x 16 x i1> %not
+}
+
+define <vscale x 1 x i1> @vmandnot_vv_nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb) {
+; CHECK-LABEL: vmandnot_vv_nxv1i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf8,ta,mu
+; CHECK-NEXT:    vmandnot.mm v0, v0, v16
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x i1> undef, i1 1, i32 0
+  %splat = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer
+  %not = xor <vscale x 1 x i1> %vb, %splat
+  %vc = and <vscale x 1 x i1> %va, %not
+  ret <vscale x 1 x i1> %vc
+}
+
+define <vscale x 2 x i1> @vmandnot_vv_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb) {
+; CHECK-LABEL: vmandnot_vv_nxv2i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf4,ta,mu
+; CHECK-NEXT:    vmandnot.mm v0, v0, v16
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x i1> undef, i1 1, i32 0
+  %splat = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
+  %not = xor <vscale x 2 x i1> %vb, %splat
+  %vc = and <vscale x 2 x i1> %va, %not
+  ret <vscale x 2 x i1> %vc
+}
+
+define <vscale x 4 x i1> @vmandnot_vv_nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb) {
+; CHECK-LABEL: vmandnot_vv_nxv4i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf2,ta,mu
+; CHECK-NEXT:    vmandnot.mm v0, v0, v16
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x i1> undef, i1 1, i32 0
+  %splat = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
+  %not = xor <vscale x 4 x i1> %vb, %splat
+  %vc = and <vscale x 4 x i1> %va, %not
+  ret <vscale x 4 x i1> %vc
+}
+
+define <vscale x 8 x i1> @vmandnot_vv_nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb) {
+; CHECK-LABEL: vmandnot_vv_nxv8i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmandnot.mm v0, v0, v16
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x i1> undef, i1 1, i32 0
+  %splat = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
+  %not = xor <vscale x 8 x i1> %vb, %splat
+  %vc = and <vscale x 8 x i1> %va, %not
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 16 x i1> @vmandnot_vv_nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb) {
+; CHECK-LABEL: vmandnot_vv_nxv16i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,m2,ta,mu
+; CHECK-NEXT:    vmandnot.mm v0, v0, v16
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 16 x i1> undef, i1 1, i32 0
+  %splat = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer
+  %not = xor <vscale x 16 x i1> %vb, %splat
+  %vc = and <vscale x 16 x i1> %va, %not
+  ret <vscale x 16 x i1> %vc
+}
+
+define <vscale x 1 x i1> @vmornot_vv_nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb) {
+; CHECK-LABEL: vmornot_vv_nxv1i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf8,ta,mu
+; CHECK-NEXT:    vmornot.mm v0, v0, v16
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x i1> undef, i1 1, i32 0
+  %splat = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer
+  %not = xor <vscale x 1 x i1> %vb, %splat
+  %vc = or <vscale x 1 x i1> %va, %not
+  ret <vscale x 1 x i1> %vc
+}
+
+define <vscale x 2 x i1> @vmornot_vv_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb) {
+; CHECK-LABEL: vmornot_vv_nxv2i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf4,ta,mu
+; CHECK-NEXT:    vmornot.mm v0, v0, v16
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x i1> undef, i1 1, i32 0
+  %splat = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
+  %not = xor <vscale x 2 x i1> %vb, %splat
+  %vc = or <vscale x 2 x i1> %va, %not
+  ret <vscale x 2 x i1> %vc
+}
+
+define <vscale x 4 x i1> @vmornot_vv_nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb) {
+; CHECK-LABEL: vmornot_vv_nxv4i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf2,ta,mu
+; CHECK-NEXT:    vmornot.mm v0, v0, v16
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x i1> undef, i1 1, i32 0
+  %splat = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
+  %not = xor <vscale x 4 x i1> %vb, %splat
+  %vc = or <vscale x 4 x i1> %va, %not
+  ret <vscale x 4 x i1> %vc
+}
+
+define <vscale x 8 x i1> @vmornot_vv_nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb) {
+; CHECK-LABEL: vmornot_vv_nxv8i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmornot.mm v0, v0, v16
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x i1> undef, i1 1, i32 0
+  %splat = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
+  %not = xor <vscale x 8 x i1> %vb, %splat
+  %vc = or <vscale x 8 x i1> %va, %not
+  ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 16 x i1> @vmornot_vv_nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb) {
+; CHECK-LABEL: vmornot_vv_nxv16i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,m2,ta,mu
+; CHECK-NEXT:    vmornot.mm v0, v0, v16
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 16 x i1> undef, i1 1, i32 0
+  %splat = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer
+  %not = xor <vscale x 16 x i1> %vb, %splat
+  %vc = or <vscale x 16 x i1> %va, %not
+  ret <vscale x 16 x i1> %vc
+}
+

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsplats-i1.ll b/llvm/test/CodeGen/RISCV/rvv/vsplats-i1.ll
new file mode 100644
index 000000000000..f6c9fa06c755
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vsplats-i1.ll
@@ -0,0 +1,113 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
+
+define <vscale x 1 x i1> @vsplat_nxv1i1_0() {
+; CHECK-LABEL: vsplat_nxv1i1_0:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf8,ta,mu
+; CHECK-NEXT:    vmclr.m v0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x i1> undef, i1 0, i32 0
+  %splat = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer
+  ret <vscale x 1 x i1> %splat
+}
+
+define <vscale x 1 x i1> @vsplat_nxv1i1_1() {
+; CHECK-LABEL: vsplat_nxv1i1_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf8,ta,mu
+; CHECK-NEXT:    vmset.m v0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x i1> undef, i1 -1, i32 0
+  %splat = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer
+  ret <vscale x 1 x i1> %splat
+}
+
+define <vscale x 2 x i1> @vsplat_nxv2i1_0() {
+; CHECK-LABEL: vsplat_nxv2i1_0:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf4,ta,mu
+; CHECK-NEXT:    vmclr.m v0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x i1> undef, i1 0, i32 0
+  %splat = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
+  ret <vscale x 2 x i1> %splat
+}
+
+define <vscale x 2 x i1> @vsplat_nxv2i1_1() {
+; CHECK-LABEL: vsplat_nxv2i1_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf4,ta,mu
+; CHECK-NEXT:    vmset.m v0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x i1> undef, i1 -1, i32 0
+  %splat = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
+  ret <vscale x 2 x i1> %splat
+}
+
+define <vscale x 4 x i1> @vsplat_nxv4i1_0() {
+; CHECK-LABEL: vsplat_nxv4i1_0:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf2,ta,mu
+; CHECK-NEXT:    vmclr.m v0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x i1> undef, i1 0, i32 0
+  %splat = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
+  ret <vscale x 4 x i1> %splat
+}
+
+define <vscale x 4 x i1> @vsplat_nxv4i1_1() {
+; CHECK-LABEL: vsplat_nxv4i1_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf2,ta,mu
+; CHECK-NEXT:    vmset.m v0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x i1> undef, i1 -1, i32 0
+  %splat = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
+  ret <vscale x 4 x i1> %splat
+}
+
+define <vscale x 8 x i1> @vsplat_nxv8i1_0() {
+; CHECK-LABEL: vsplat_nxv8i1_0:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmclr.m v0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x i1> undef, i1 0, i32 0
+  %splat = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
+  ret <vscale x 8 x i1> %splat
+}
+
+define <vscale x 8 x i1> @vsplat_nxv8i1_1() {
+; CHECK-LABEL: vsplat_nxv8i1_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmset.m v0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x i1> undef, i1 -1, i32 0
+  %splat = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
+  ret <vscale x 8 x i1> %splat
+}
+
+define <vscale x 16 x i1> @vsplat_nxv16i1_0() {
+; CHECK-LABEL: vsplat_nxv16i1_0:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,m2,ta,mu
+; CHECK-NEXT:    vmclr.m v0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 16 x i1> undef, i1 0, i32 0
+  %splat = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer
+  ret <vscale x 16 x i1> %splat
+}
+
+define <vscale x 16 x i1> @vsplat_nxv16i1_1() {
+; CHECK-LABEL: vsplat_nxv16i1_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,m2,ta,mu
+; CHECK-NEXT:    vmset.m v0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 16 x i1> undef, i1 -1, i32 0
+  %splat = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer
+  ret <vscale x 16 x i1> %splat
+}


        


More information about the llvm-branch-commits mailing list