[llvm] 5eb7191 - [RISCV] Add VP patterns for vandn.[vv,vx]

Luke Lau via llvm-commits llvm-commits at lists.llvm.org
Tue Jul 18 05:53:25 PDT 2023


Author: Luke Lau
Date: 2023-07-18T13:53:17+01:00
New Revision: 5eb7191421515d8fc7aa2c73afa50bfa67cb64a3

URL: https://github.com/llvm/llvm-project/commit/5eb7191421515d8fc7aa2c73afa50bfa67cb64a3
DIFF: https://github.com/llvm/llvm-project/commit/5eb7191421515d8fc7aa2c73afa50bfa67cb64a3.diff

LOG: [RISCV] Add VP patterns for vandn.[vv,vx]

This builds upon D155433

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D155434

Added: 
    llvm/test/CodeGen/RISCV/rvv/vandn-vp.ll

Modified: 
    llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
index 35242c605d5f5b..41c7be4fcbe543 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
@@ -274,6 +274,45 @@ multiclass VPatUnaryVL_V<SDPatternOperator op, string instruction_name> {
   }
 }
 
+foreach vti = AllIntegerVectors in {
+  let Predicates = !listconcat([HasStdExtZvbb],
+                               GetVTypePredicates<vti>.Predicates) in {
+    def : Pat<(vti.Vector (riscv_and_vl (riscv_xor_vl
+                                           (vti.Vector vti.RegClass:$rs1),
+                                           (riscv_splat_vector -1),
+                                           (vti.Vector vti.RegClass:$merge),
+                                           (vti.Mask V0),
+                                           VLOpFrag),
+                                        (vti.Vector vti.RegClass:$rs2),
+                                        (vti.Vector vti.RegClass:$merge),
+                                        (vti.Mask V0),
+                                        VLOpFrag)),
+              (!cast<Instruction>("PseudoVANDN_VV_"#vti.LMul.MX#"_MASK")
+                 vti.RegClass:$merge,
+                 vti.RegClass:$rs2,
+                 vti.RegClass:$rs1,
+                 (vti.Mask V0),
+                 GPR:$vl,
+                 vti.Log2SEW,
+                 TAIL_AGNOSTIC)>;
+
+    def : Pat<(vti.Vector (riscv_and_vl (riscv_splat_vector
+                                           (not vti.ScalarRegClass:$rs1)),
+                                        (vti.Vector vti.RegClass:$rs2),
+                                        (vti.Vector vti.RegClass:$merge),
+                                        (vti.Mask V0),
+                                        VLOpFrag)),
+              (!cast<Instruction>("PseudoVANDN_VX_"#vti.LMul.MX#"_MASK")
+                 vti.RegClass:$merge,
+                 vti.RegClass:$rs2,
+                 vti.ScalarRegClass:$rs1,
+                 (vti.Mask V0),
+                 GPR:$vl,
+                 vti.Log2SEW,
+                 TAIL_AGNOSTIC)>;
+  }
+}
+
 defm : VPatUnaryVL_V<riscv_bitreverse_vl, "PseudoVBREV">;
 defm : VPatUnaryVL_V<riscv_bswap_vl, "PseudoVREV8">;
 defm : VPatUnaryVL_V<riscv_ctlz_vl, "PseudoVCLZ">;

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vandn-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vandn-vp.ll
new file mode 100644
index 00000000000000..d0e23f236b2f7a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vandn-vp.ll
@@ -0,0 +1,1432 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-RV32
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-RV64
+; RUN: llc -mtriple=riscv32 -mattr=+v,+experimental-zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB,CHECK-ZVBB32
+; RUN: llc -mtriple=riscv64 -mattr=+v,+experimental-zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB,CHECK-ZVBB64
+
+declare <vscale x 1 x i8> @llvm.vp.and.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
+declare <vscale x 1 x i8> @llvm.vp.xor.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i8> @vandn_vv_vp_nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vv_vp_nxv1i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vnot.v v8, v8, v0.t
+; CHECK-NEXT:    vand.vv v8, v8, v9, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vv_vp_nxv1i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vv v8, v9, v8, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = call <vscale x 1 x i8> @llvm.vp.xor.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> shufflevector(<vscale x 1 x i8> insertelement(<vscale x 1 x i8> poison, i8 -1, i32 0), <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer), <vscale x 1 x i1> %mask, i32 %evl)
+  %x = call <vscale x 1 x i8> @llvm.vp.and.nxv1i8(<vscale x 1 x i8> %not.a, <vscale x 1 x i8> %b, <vscale x 1 x i1> %mask, i32 %evl)
+  ret <vscale x 1 x i8> %x
+}
+
+define <vscale x 1 x i8> @vandn_vv_vp_swapped_nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vv_vp_swapped_nxv1i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vnot.v v8, v8, v0.t
+; CHECK-NEXT:    vand.vv v8, v9, v8, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vv_vp_swapped_nxv1i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vv v8, v9, v8, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = call <vscale x 1 x i8> @llvm.vp.xor.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> shufflevector(<vscale x 1 x i8> insertelement(<vscale x 1 x i8> poison, i8 -1, i32 0), <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer), <vscale x 1 x i1> %mask, i32 %evl)
+  %x = call <vscale x 1 x i8> @llvm.vp.and.nxv1i8(<vscale x 1 x i8> %b, <vscale x 1 x i8> %not.a, <vscale x 1 x i1> %mask, i32 %evl)
+  ret <vscale x 1 x i8> %x
+}
+
+define <vscale x 1 x i8> @vandn_vx_vp_nxv1i8(i8 %a, <vscale x 1 x i8> %b, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vx_vp_nxv1i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    not a0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vx_vp_nxv1i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vx v8, v8, a0, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = xor i8 %a, -1
+  %head.not.a = insertelement <vscale x 1 x i8> poison, i8 %not.a, i32 0
+  %splat.not.a = shufflevector <vscale x 1 x i8> %head.not.a, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
+  %x = call <vscale x 1 x i8> @llvm.vp.and.nxv1i8(<vscale x 1 x i8> %b, <vscale x 1 x i8> %splat.not.a, <vscale x 1 x i1> %mask, i32 %evl)
+  ret <vscale x 1 x i8> %x
+}
+
+declare <vscale x 2 x i8> @llvm.vp.and.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
+declare <vscale x 2 x i8> @llvm.vp.xor.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i8> @vandn_vv_vp_nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vv_vp_nxv2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    vnot.v v8, v8, v0.t
+; CHECK-NEXT:    vand.vv v8, v8, v9, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vv_vp_nxv2i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vv v8, v9, v8, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = call <vscale x 2 x i8> @llvm.vp.xor.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> shufflevector(<vscale x 2 x i8> insertelement(<vscale x 2 x i8> poison, i8 -1, i32 0), <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer), <vscale x 2 x i1> %mask, i32 %evl)
+  %x = call <vscale x 2 x i8> @llvm.vp.and.nxv2i8(<vscale x 2 x i8> %not.a, <vscale x 2 x i8> %b, <vscale x 2 x i1> %mask, i32 %evl)
+  ret <vscale x 2 x i8> %x
+}
+
+define <vscale x 2 x i8> @vandn_vv_vp_swapped_nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vv_vp_swapped_nxv2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    vnot.v v8, v8, v0.t
+; CHECK-NEXT:    vand.vv v8, v9, v8, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vv_vp_swapped_nxv2i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vv v8, v9, v8, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = call <vscale x 2 x i8> @llvm.vp.xor.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> shufflevector(<vscale x 2 x i8> insertelement(<vscale x 2 x i8> poison, i8 -1, i32 0), <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer), <vscale x 2 x i1> %mask, i32 %evl)
+  %x = call <vscale x 2 x i8> @llvm.vp.and.nxv2i8(<vscale x 2 x i8> %b, <vscale x 2 x i8> %not.a, <vscale x 2 x i1> %mask, i32 %evl)
+  ret <vscale x 2 x i8> %x
+}
+
+define <vscale x 2 x i8> @vandn_vx_vp_nxv2i8(i8 %a, <vscale x 2 x i8> %b, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vx_vp_nxv2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    not a0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vx_vp_nxv2i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vx v8, v8, a0, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = xor i8 %a, -1
+  %head.not.a = insertelement <vscale x 2 x i8> poison, i8 %not.a, i32 0
+  %splat.not.a = shufflevector <vscale x 2 x i8> %head.not.a, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
+  %x = call <vscale x 2 x i8> @llvm.vp.and.nxv2i8(<vscale x 2 x i8> %b, <vscale x 2 x i8> %splat.not.a, <vscale x 2 x i1> %mask, i32 %evl)
+  ret <vscale x 2 x i8> %x
+}
+
+declare <vscale x 4 x i8> @llvm.vp.and.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
+declare <vscale x 4 x i8> @llvm.vp.xor.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i8> @vandn_vv_vp_nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, <vscale x 4 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vv_vp_nxv4i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    vnot.v v8, v8, v0.t
+; CHECK-NEXT:    vand.vv v8, v8, v9, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vv_vp_nxv4i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vv v8, v9, v8, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = call <vscale x 4 x i8> @llvm.vp.xor.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> shufflevector(<vscale x 4 x i8> insertelement(<vscale x 4 x i8> poison, i8 -1, i32 0), <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x i1> %mask, i32 %evl)
+  %x = call <vscale x 4 x i8> @llvm.vp.and.nxv4i8(<vscale x 4 x i8> %not.a, <vscale x 4 x i8> %b, <vscale x 4 x i1> %mask, i32 %evl)
+  ret <vscale x 4 x i8> %x
+}
+
+define <vscale x 4 x i8> @vandn_vv_vp_swapped_nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, <vscale x 4 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vv_vp_swapped_nxv4i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    vnot.v v8, v8, v0.t
+; CHECK-NEXT:    vand.vv v8, v9, v8, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vv_vp_swapped_nxv4i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vv v8, v9, v8, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = call <vscale x 4 x i8> @llvm.vp.xor.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> shufflevector(<vscale x 4 x i8> insertelement(<vscale x 4 x i8> poison, i8 -1, i32 0), <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x i1> %mask, i32 %evl)
+  %x = call <vscale x 4 x i8> @llvm.vp.and.nxv4i8(<vscale x 4 x i8> %b, <vscale x 4 x i8> %not.a, <vscale x 4 x i1> %mask, i32 %evl)
+  ret <vscale x 4 x i8> %x
+}
+
+define <vscale x 4 x i8> @vandn_vx_vp_nxv4i8(i8 %a, <vscale x 4 x i8> %b, <vscale x 4 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vx_vp_nxv4i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    not a0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vx_vp_nxv4i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vx v8, v8, a0, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = xor i8 %a, -1
+  %head.not.a = insertelement <vscale x 4 x i8> poison, i8 %not.a, i32 0
+  %splat.not.a = shufflevector <vscale x 4 x i8> %head.not.a, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
+  %x = call <vscale x 4 x i8> @llvm.vp.and.nxv4i8(<vscale x 4 x i8> %b, <vscale x 4 x i8> %splat.not.a, <vscale x 4 x i1> %mask, i32 %evl)
+  ret <vscale x 4 x i8> %x
+}
+
+declare <vscale x 8 x i8> @llvm.vp.and.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i32)
+declare <vscale x 8 x i8> @llvm.vp.xor.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i8> @vandn_vv_vp_nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vv_vp_nxv8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vnot.v v8, v8, v0.t
+; CHECK-NEXT:    vand.vv v8, v8, v9, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vv_vp_nxv8i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vv v8, v9, v8, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = call <vscale x 8 x i8> @llvm.vp.xor.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> shufflevector(<vscale x 8 x i8> insertelement(<vscale x 8 x i8> poison, i8 -1, i32 0), <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer), <vscale x 8 x i1> %mask, i32 %evl)
+  %x = call <vscale x 8 x i8> @llvm.vp.and.nxv8i8(<vscale x 8 x i8> %not.a, <vscale x 8 x i8> %b, <vscale x 8 x i1> %mask, i32 %evl)
+  ret <vscale x 8 x i8> %x
+}
+
+define <vscale x 8 x i8> @vandn_vv_vp_swapped_nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vv_vp_swapped_nxv8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vnot.v v8, v8, v0.t
+; CHECK-NEXT:    vand.vv v8, v9, v8, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vv_vp_swapped_nxv8i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vv v8, v9, v8, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = call <vscale x 8 x i8> @llvm.vp.xor.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> shufflevector(<vscale x 8 x i8> insertelement(<vscale x 8 x i8> poison, i8 -1, i32 0), <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer), <vscale x 8 x i1> %mask, i32 %evl)
+  %x = call <vscale x 8 x i8> @llvm.vp.and.nxv8i8(<vscale x 8 x i8> %b, <vscale x 8 x i8> %not.a, <vscale x 8 x i1> %mask, i32 %evl)
+  ret <vscale x 8 x i8> %x
+}
+
+define <vscale x 8 x i8> @vandn_vx_vp_nxv8i8(i8 %a, <vscale x 8 x i8> %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vx_vp_nxv8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    not a0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vx_vp_nxv8i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vx v8, v8, a0, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = xor i8 %a, -1
+  %head.not.a = insertelement <vscale x 8 x i8> poison, i8 %not.a, i32 0
+  %splat.not.a = shufflevector <vscale x 8 x i8> %head.not.a, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
+  %x = call <vscale x 8 x i8> @llvm.vp.and.nxv8i8(<vscale x 8 x i8> %b, <vscale x 8 x i8> %splat.not.a, <vscale x 8 x i1> %mask, i32 %evl)
+  ret <vscale x 8 x i8> %x
+}
+
+declare <vscale x 16 x i8> @llvm.vp.and.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i32)
+declare <vscale x 16 x i8> @llvm.vp.xor.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i32)
+
+define <vscale x 16 x i8> @vandn_vv_vp_nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vv_vp_nxv16i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    vnot.v v8, v8, v0.t
+; CHECK-NEXT:    vand.vv v8, v8, v10, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vv_vp_nxv16i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vv v8, v10, v8, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = call <vscale x 16 x i8> @llvm.vp.xor.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> shufflevector(<vscale x 16 x i8> insertelement(<vscale x 16 x i8> poison, i8 -1, i32 0), <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer), <vscale x 16 x i1> %mask, i32 %evl)
+  %x = call <vscale x 16 x i8> @llvm.vp.and.nxv16i8(<vscale x 16 x i8> %not.a, <vscale x 16 x i8> %b, <vscale x 16 x i1> %mask, i32 %evl)
+  ret <vscale x 16 x i8> %x
+}
+
+define <vscale x 16 x i8> @vandn_vv_vp_swapped_nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vv_vp_swapped_nxv16i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    vnot.v v8, v8, v0.t
+; CHECK-NEXT:    vand.vv v8, v10, v8, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vv_vp_swapped_nxv16i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vv v8, v10, v8, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = call <vscale x 16 x i8> @llvm.vp.xor.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> shufflevector(<vscale x 16 x i8> insertelement(<vscale x 16 x i8> poison, i8 -1, i32 0), <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer), <vscale x 16 x i1> %mask, i32 %evl)
+  %x = call <vscale x 16 x i8> @llvm.vp.and.nxv16i8(<vscale x 16 x i8> %b, <vscale x 16 x i8> %not.a, <vscale x 16 x i1> %mask, i32 %evl)
+  ret <vscale x 16 x i8> %x
+}
+
+define <vscale x 16 x i8> @vandn_vx_vp_nxv16i8(i8 %a, <vscale x 16 x i8> %b, <vscale x 16 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vx_vp_nxv16i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    not a0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vx_vp_nxv16i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vx v8, v8, a0, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = xor i8 %a, -1
+  %head.not.a = insertelement <vscale x 16 x i8> poison, i8 %not.a, i32 0
+  %splat.not.a = shufflevector <vscale x 16 x i8> %head.not.a, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
+  %x = call <vscale x 16 x i8> @llvm.vp.and.nxv16i8(<vscale x 16 x i8> %b, <vscale x 16 x i8> %splat.not.a, <vscale x 16 x i1> %mask, i32 %evl)
+  ret <vscale x 16 x i8> %x
+}
+
+declare <vscale x 32 x i8> @llvm.vp.and.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i1>, i32)
+declare <vscale x 32 x i8> @llvm.vp.xor.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i1>, i32)
+
+define <vscale x 32 x i8> @vandn_vv_vp_nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, <vscale x 32 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vv_vp_nxv32i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    vnot.v v8, v8, v0.t
+; CHECK-NEXT:    vand.vv v8, v8, v12, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vv_vp_nxv32i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vv v8, v12, v8, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = call <vscale x 32 x i8> @llvm.vp.xor.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> shufflevector(<vscale x 32 x i8> insertelement(<vscale x 32 x i8> poison, i8 -1, i32 0), <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer), <vscale x 32 x i1> %mask, i32 %evl)
+  %x = call <vscale x 32 x i8> @llvm.vp.and.nxv32i8(<vscale x 32 x i8> %not.a, <vscale x 32 x i8> %b, <vscale x 32 x i1> %mask, i32 %evl)
+  ret <vscale x 32 x i8> %x
+}
+
+define <vscale x 32 x i8> @vandn_vv_vp_swapped_nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, <vscale x 32 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vv_vp_swapped_nxv32i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    vnot.v v8, v8, v0.t
+; CHECK-NEXT:    vand.vv v8, v12, v8, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vv_vp_swapped_nxv32i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vv v8, v12, v8, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = call <vscale x 32 x i8> @llvm.vp.xor.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> shufflevector(<vscale x 32 x i8> insertelement(<vscale x 32 x i8> poison, i8 -1, i32 0), <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer), <vscale x 32 x i1> %mask, i32 %evl)
+  %x = call <vscale x 32 x i8> @llvm.vp.and.nxv32i8(<vscale x 32 x i8> %b, <vscale x 32 x i8> %not.a, <vscale x 32 x i1> %mask, i32 %evl)
+  ret <vscale x 32 x i8> %x
+}
+
+define <vscale x 32 x i8> @vandn_vx_vp_nxv32i8(i8 %a, <vscale x 32 x i8> %b, <vscale x 32 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vx_vp_nxv32i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    not a0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vx_vp_nxv32i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vx v8, v8, a0, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = xor i8 %a, -1
+  %head.not.a = insertelement <vscale x 32 x i8> poison, i8 %not.a, i32 0
+  %splat.not.a = shufflevector <vscale x 32 x i8> %head.not.a, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
+  %x = call <vscale x 32 x i8> @llvm.vp.and.nxv32i8(<vscale x 32 x i8> %b, <vscale x 32 x i8> %splat.not.a, <vscale x 32 x i1> %mask, i32 %evl)
+  ret <vscale x 32 x i8> %x
+}
+
+declare <vscale x 64 x i8> @llvm.vp.and.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i1>, i32)
+declare <vscale x 64 x i8> @llvm.vp.xor.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i1>, i32)
+
+define <vscale x 64 x i8> @vandn_vv_vp_nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, <vscale x 64 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vv_vp_nxv64i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT:    vnot.v v8, v8, v0.t
+; CHECK-NEXT:    vand.vv v8, v8, v16, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vv_vp_nxv64i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vv v8, v16, v8, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = call <vscale x 64 x i8> @llvm.vp.xor.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> shufflevector(<vscale x 64 x i8> insertelement(<vscale x 64 x i8> poison, i8 -1, i32 0), <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer), <vscale x 64 x i1> %mask, i32 %evl)
+  %x = call <vscale x 64 x i8> @llvm.vp.and.nxv64i8(<vscale x 64 x i8> %not.a, <vscale x 64 x i8> %b, <vscale x 64 x i1> %mask, i32 %evl)
+  ret <vscale x 64 x i8> %x
+}
+
+define <vscale x 64 x i8> @vandn_vv_vp_swapped_nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, <vscale x 64 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vv_vp_swapped_nxv64i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT:    vnot.v v8, v8, v0.t
+; CHECK-NEXT:    vand.vv v8, v16, v8, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vv_vp_swapped_nxv64i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vv v8, v16, v8, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = call <vscale x 64 x i8> @llvm.vp.xor.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> shufflevector(<vscale x 64 x i8> insertelement(<vscale x 64 x i8> poison, i8 -1, i32 0), <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer), <vscale x 64 x i1> %mask, i32 %evl)
+  %x = call <vscale x 64 x i8> @llvm.vp.and.nxv64i8(<vscale x 64 x i8> %b, <vscale x 64 x i8> %not.a, <vscale x 64 x i1> %mask, i32 %evl)
+  ret <vscale x 64 x i8> %x
+}
+
+define <vscale x 64 x i8> @vandn_vx_vp_nxv64i8(i8 %a, <vscale x 64 x i8> %b, <vscale x 64 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vx_vp_nxv64i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    not a0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vx_vp_nxv64i8:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vx v8, v8, a0, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = xor i8 %a, -1
+  %head.not.a = insertelement <vscale x 64 x i8> poison, i8 %not.a, i32 0
+  %splat.not.a = shufflevector <vscale x 64 x i8> %head.not.a, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
+  %x = call <vscale x 64 x i8> @llvm.vp.and.nxv64i8(<vscale x 64 x i8> %b, <vscale x 64 x i8> %splat.not.a, <vscale x 64 x i1> %mask, i32 %evl)
+  ret <vscale x 64 x i8> %x
+}
+
+declare <vscale x 1 x i16> @llvm.vp.and.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
+declare <vscale x 1 x i16> @llvm.vp.xor.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i16> @vandn_vv_vp_nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vv_vp_nxv1i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    vnot.v v8, v8, v0.t
+; CHECK-NEXT:    vand.vv v8, v8, v9, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vv_vp_nxv1i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vv v8, v9, v8, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = call <vscale x 1 x i16> @llvm.vp.xor.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> shufflevector(<vscale x 1 x i16> insertelement(<vscale x 1 x i16> poison, i16 -1, i32 0), <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer), <vscale x 1 x i1> %mask, i32 %evl)
+  %x = call <vscale x 1 x i16> @llvm.vp.and.nxv1i16(<vscale x 1 x i16> %not.a, <vscale x 1 x i16> %b, <vscale x 1 x i1> %mask, i32 %evl)
+  ret <vscale x 1 x i16> %x
+}
+
+define <vscale x 1 x i16> @vandn_vv_vp_swapped_nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vv_vp_swapped_nxv1i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    vnot.v v8, v8, v0.t
+; CHECK-NEXT:    vand.vv v8, v9, v8, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vv_vp_swapped_nxv1i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vv v8, v9, v8, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = call <vscale x 1 x i16> @llvm.vp.xor.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> shufflevector(<vscale x 1 x i16> insertelement(<vscale x 1 x i16> poison, i16 -1, i32 0), <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer), <vscale x 1 x i1> %mask, i32 %evl)
+  %x = call <vscale x 1 x i16> @llvm.vp.and.nxv1i16(<vscale x 1 x i16> %b, <vscale x 1 x i16> %not.a, <vscale x 1 x i1> %mask, i32 %evl)
+  ret <vscale x 1 x i16> %x
+}
+
+define <vscale x 1 x i16> @vandn_vx_vp_nxv1i16(i16 %a, <vscale x 1 x i16> %b, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vx_vp_nxv1i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    not a0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vx_vp_nxv1i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vx v8, v8, a0, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = xor i16 %a, -1
+  %head.not.a = insertelement <vscale x 1 x i16> poison, i16 %not.a, i32 0
+  %splat.not.a = shufflevector <vscale x 1 x i16> %head.not.a, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
+  %x = call <vscale x 1 x i16> @llvm.vp.and.nxv1i16(<vscale x 1 x i16> %b, <vscale x 1 x i16> %splat.not.a, <vscale x 1 x i1> %mask, i32 %evl)
+  ret <vscale x 1 x i16> %x
+}
+
+declare <vscale x 2 x i16> @llvm.vp.and.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
+declare <vscale x 2 x i16> @llvm.vp.xor.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i16> @vandn_vv_vp_nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vv_vp_nxv2i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vnot.v v8, v8, v0.t
+; CHECK-NEXT:    vand.vv v8, v8, v9, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vv_vp_nxv2i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vv v8, v9, v8, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = call <vscale x 2 x i16> @llvm.vp.xor.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> shufflevector(<vscale x 2 x i16> insertelement(<vscale x 2 x i16> poison, i16 -1, i32 0), <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer), <vscale x 2 x i1> %mask, i32 %evl)
+  %x = call <vscale x 2 x i16> @llvm.vp.and.nxv2i16(<vscale x 2 x i16> %not.a, <vscale x 2 x i16> %b, <vscale x 2 x i1> %mask, i32 %evl)
+  ret <vscale x 2 x i16> %x
+}
+
+define <vscale x 2 x i16> @vandn_vv_vp_swapped_nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vv_vp_swapped_nxv2i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vnot.v v8, v8, v0.t
+; CHECK-NEXT:    vand.vv v8, v9, v8, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vv_vp_swapped_nxv2i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vv v8, v9, v8, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = call <vscale x 2 x i16> @llvm.vp.xor.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> shufflevector(<vscale x 2 x i16> insertelement(<vscale x 2 x i16> poison, i16 -1, i32 0), <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer), <vscale x 2 x i1> %mask, i32 %evl)
+  %x = call <vscale x 2 x i16> @llvm.vp.and.nxv2i16(<vscale x 2 x i16> %b, <vscale x 2 x i16> %not.a, <vscale x 2 x i1> %mask, i32 %evl)
+  ret <vscale x 2 x i16> %x
+}
+
+define <vscale x 2 x i16> @vandn_vx_vp_nxv2i16(i16 %a, <vscale x 2 x i16> %b, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vx_vp_nxv2i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    not a0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vx_vp_nxv2i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vx v8, v8, a0, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = xor i16 %a, -1
+  %head.not.a = insertelement <vscale x 2 x i16> poison, i16 %not.a, i32 0
+  %splat.not.a = shufflevector <vscale x 2 x i16> %head.not.a, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
+  %x = call <vscale x 2 x i16> @llvm.vp.and.nxv2i16(<vscale x 2 x i16> %b, <vscale x 2 x i16> %splat.not.a, <vscale x 2 x i1> %mask, i32 %evl)
+  ret <vscale x 2 x i16> %x
+}
+
+declare <vscale x 4 x i16> @llvm.vp.and.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
+declare <vscale x 4 x i16> @llvm.vp.xor.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i16> @vandn_vv_vp_nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, <vscale x 4 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vv_vp_nxv4i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    vnot.v v8, v8, v0.t
+; CHECK-NEXT:    vand.vv v8, v8, v9, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vv_vp_nxv4i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vv v8, v9, v8, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = call <vscale x 4 x i16> @llvm.vp.xor.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> shufflevector(<vscale x 4 x i16> insertelement(<vscale x 4 x i16> poison, i16 -1, i32 0), <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x i1> %mask, i32 %evl)
+  %x = call <vscale x 4 x i16> @llvm.vp.and.nxv4i16(<vscale x 4 x i16> %not.a, <vscale x 4 x i16> %b, <vscale x 4 x i1> %mask, i32 %evl)
+  ret <vscale x 4 x i16> %x
+}
+
+define <vscale x 4 x i16> @vandn_vv_vp_swapped_nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, <vscale x 4 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vv_vp_swapped_nxv4i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    vnot.v v8, v8, v0.t
+; CHECK-NEXT:    vand.vv v8, v9, v8, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vv_vp_swapped_nxv4i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vv v8, v9, v8, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = call <vscale x 4 x i16> @llvm.vp.xor.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> shufflevector(<vscale x 4 x i16> insertelement(<vscale x 4 x i16> poison, i16 -1, i32 0), <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x i1> %mask, i32 %evl)
+  %x = call <vscale x 4 x i16> @llvm.vp.and.nxv4i16(<vscale x 4 x i16> %b, <vscale x 4 x i16> %not.a, <vscale x 4 x i1> %mask, i32 %evl)
+  ret <vscale x 4 x i16> %x
+}
+
+define <vscale x 4 x i16> @vandn_vx_vp_nxv4i16(i16 %a, <vscale x 4 x i16> %b, <vscale x 4 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vx_vp_nxv4i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    not a0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vx_vp_nxv4i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vx v8, v8, a0, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = xor i16 %a, -1
+  %head.not.a = insertelement <vscale x 4 x i16> poison, i16 %not.a, i32 0
+  %splat.not.a = shufflevector <vscale x 4 x i16> %head.not.a, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
+  %x = call <vscale x 4 x i16> @llvm.vp.and.nxv4i16(<vscale x 4 x i16> %b, <vscale x 4 x i16> %splat.not.a, <vscale x 4 x i1> %mask, i32 %evl)
+  ret <vscale x 4 x i16> %x
+}
+
+declare <vscale x 8 x i16> @llvm.vp.and.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
+declare <vscale x 8 x i16> @llvm.vp.xor.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i16> @vandn_vv_vp_nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vv_vp_nxv8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    vnot.v v8, v8, v0.t
+; CHECK-NEXT:    vand.vv v8, v8, v10, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vv_vp_nxv8i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vv v8, v10, v8, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = call <vscale x 8 x i16> @llvm.vp.xor.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> shufflevector(<vscale x 8 x i16> insertelement(<vscale x 8 x i16> poison, i16 -1, i32 0), <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer), <vscale x 8 x i1> %mask, i32 %evl)
+  %x = call <vscale x 8 x i16> @llvm.vp.and.nxv8i16(<vscale x 8 x i16> %not.a, <vscale x 8 x i16> %b, <vscale x 8 x i1> %mask, i32 %evl)
+  ret <vscale x 8 x i16> %x
+}
+
+define <vscale x 8 x i16> @vandn_vv_vp_swapped_nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vv_vp_swapped_nxv8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    vnot.v v8, v8, v0.t
+; CHECK-NEXT:    vand.vv v8, v10, v8, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vv_vp_swapped_nxv8i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vv v8, v10, v8, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = call <vscale x 8 x i16> @llvm.vp.xor.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> shufflevector(<vscale x 8 x i16> insertelement(<vscale x 8 x i16> poison, i16 -1, i32 0), <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer), <vscale x 8 x i1> %mask, i32 %evl)
+  %x = call <vscale x 8 x i16> @llvm.vp.and.nxv8i16(<vscale x 8 x i16> %b, <vscale x 8 x i16> %not.a, <vscale x 8 x i1> %mask, i32 %evl)
+  ret <vscale x 8 x i16> %x
+}
+
+define <vscale x 8 x i16> @vandn_vx_vp_nxv8i16(i16 %a, <vscale x 8 x i16> %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vx_vp_nxv8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    not a0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vx_vp_nxv8i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vx v8, v8, a0, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = xor i16 %a, -1
+  %head.not.a = insertelement <vscale x 8 x i16> poison, i16 %not.a, i32 0
+  %splat.not.a = shufflevector <vscale x 8 x i16> %head.not.a, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
+  %x = call <vscale x 8 x i16> @llvm.vp.and.nxv8i16(<vscale x 8 x i16> %b, <vscale x 8 x i16> %splat.not.a, <vscale x 8 x i1> %mask, i32 %evl)
+  ret <vscale x 8 x i16> %x
+}
+
+declare <vscale x 16 x i16> @llvm.vp.and.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i32)
+declare <vscale x 16 x i16> @llvm.vp.xor.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i32)
+
+define <vscale x 16 x i16> @vandn_vv_vp_nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, <vscale x 16 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vv_vp_nxv16i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vnot.v v8, v8, v0.t
+; CHECK-NEXT:    vand.vv v8, v8, v12, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vv_vp_nxv16i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vv v8, v12, v8, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = call <vscale x 16 x i16> @llvm.vp.xor.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> shufflevector(<vscale x 16 x i16> insertelement(<vscale x 16 x i16> poison, i16 -1, i32 0), <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer), <vscale x 16 x i1> %mask, i32 %evl)
+  %x = call <vscale x 16 x i16> @llvm.vp.and.nxv16i16(<vscale x 16 x i16> %not.a, <vscale x 16 x i16> %b, <vscale x 16 x i1> %mask, i32 %evl)
+  ret <vscale x 16 x i16> %x
+}
+
+define <vscale x 16 x i16> @vandn_vv_vp_swapped_nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, <vscale x 16 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vv_vp_swapped_nxv16i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vnot.v v8, v8, v0.t
+; CHECK-NEXT:    vand.vv v8, v12, v8, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vv_vp_swapped_nxv16i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vv v8, v12, v8, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = call <vscale x 16 x i16> @llvm.vp.xor.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> shufflevector(<vscale x 16 x i16> insertelement(<vscale x 16 x i16> poison, i16 -1, i32 0), <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer), <vscale x 16 x i1> %mask, i32 %evl)
+  %x = call <vscale x 16 x i16> @llvm.vp.and.nxv16i16(<vscale x 16 x i16> %b, <vscale x 16 x i16> %not.a, <vscale x 16 x i1> %mask, i32 %evl)
+  ret <vscale x 16 x i16> %x
+}
+
+define <vscale x 16 x i16> @vandn_vx_vp_nxv16i16(i16 %a, <vscale x 16 x i16> %b, <vscale x 16 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vx_vp_nxv16i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    not a0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vx_vp_nxv16i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vx v8, v8, a0, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = xor i16 %a, -1
+  %head.not.a = insertelement <vscale x 16 x i16> poison, i16 %not.a, i32 0
+  %splat.not.a = shufflevector <vscale x 16 x i16> %head.not.a, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
+  %x = call <vscale x 16 x i16> @llvm.vp.and.nxv16i16(<vscale x 16 x i16> %b, <vscale x 16 x i16> %splat.not.a, <vscale x 16 x i1> %mask, i32 %evl)
+  ret <vscale x 16 x i16> %x
+}
+
+declare <vscale x 32 x i16> @llvm.vp.and.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i1>, i32)
+declare <vscale x 32 x i16> @llvm.vp.xor.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i1>, i32)
+
+define <vscale x 32 x i16> @vandn_vv_vp_nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vv_vp_nxv32i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    vnot.v v8, v8, v0.t
+; CHECK-NEXT:    vand.vv v8, v8, v16, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vv_vp_nxv32i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vv v8, v16, v8, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = call <vscale x 32 x i16> @llvm.vp.xor.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> shufflevector(<vscale x 32 x i16> insertelement(<vscale x 32 x i16> poison, i16 -1, i32 0), <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer), <vscale x 32 x i1> %mask, i32 %evl)
+  %x = call <vscale x 32 x i16> @llvm.vp.and.nxv32i16(<vscale x 32 x i16> %not.a, <vscale x 32 x i16> %b, <vscale x 32 x i1> %mask, i32 %evl)
+  ret <vscale x 32 x i16> %x
+}
+
+define <vscale x 32 x i16> @vandn_vv_vp_swapped_nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vv_vp_swapped_nxv32i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    vnot.v v8, v8, v0.t
+; CHECK-NEXT:    vand.vv v8, v16, v8, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vv_vp_swapped_nxv32i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vv v8, v16, v8, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = call <vscale x 32 x i16> @llvm.vp.xor.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> shufflevector(<vscale x 32 x i16> insertelement(<vscale x 32 x i16> poison, i16 -1, i32 0), <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer), <vscale x 32 x i1> %mask, i32 %evl)
+  %x = call <vscale x 32 x i16> @llvm.vp.and.nxv32i16(<vscale x 32 x i16> %b, <vscale x 32 x i16> %not.a, <vscale x 32 x i1> %mask, i32 %evl)
+  ret <vscale x 32 x i16> %x
+}
+
+define <vscale x 32 x i16> @vandn_vx_vp_nxv32i16(i16 %a, <vscale x 32 x i16> %b, <vscale x 32 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vx_vp_nxv32i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    not a0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vx_vp_nxv32i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vx v8, v8, a0, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = xor i16 %a, -1
+  %head.not.a = insertelement <vscale x 32 x i16> poison, i16 %not.a, i32 0
+  %splat.not.a = shufflevector <vscale x 32 x i16> %head.not.a, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
+  %x = call <vscale x 32 x i16> @llvm.vp.and.nxv32i16(<vscale x 32 x i16> %b, <vscale x 32 x i16> %splat.not.a, <vscale x 32 x i1> %mask, i32 %evl)
+  ret <vscale x 32 x i16> %x
+}
+
+declare <vscale x 1 x i32> @llvm.vp.and.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
+declare <vscale x 1 x i32> @llvm.vp.xor.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i32> @vandn_vv_vp_nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vv_vp_nxv1i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    vnot.v v8, v8, v0.t
+; CHECK-NEXT:    vand.vv v8, v8, v9, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vv_vp_nxv1i32:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vv v8, v9, v8, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = call <vscale x 1 x i32> @llvm.vp.xor.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> shufflevector(<vscale x 1 x i32> insertelement(<vscale x 1 x i32> poison, i32 -1, i32 0), <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer), <vscale x 1 x i1> %mask, i32 %evl)
+  %x = call <vscale x 1 x i32> @llvm.vp.and.nxv1i32(<vscale x 1 x i32> %not.a, <vscale x 1 x i32> %b, <vscale x 1 x i1> %mask, i32 %evl)
+  ret <vscale x 1 x i32> %x
+}
+
+define <vscale x 1 x i32> @vandn_vv_vp_swapped_nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vv_vp_swapped_nxv1i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    vnot.v v8, v8, v0.t
+; CHECK-NEXT:    vand.vv v8, v9, v8, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vv_vp_swapped_nxv1i32:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vv v8, v9, v8, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = call <vscale x 1 x i32> @llvm.vp.xor.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> shufflevector(<vscale x 1 x i32> insertelement(<vscale x 1 x i32> poison, i32 -1, i32 0), <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer), <vscale x 1 x i1> %mask, i32 %evl)
+  %x = call <vscale x 1 x i32> @llvm.vp.and.nxv1i32(<vscale x 1 x i32> %b, <vscale x 1 x i32> %not.a, <vscale x 1 x i1> %mask, i32 %evl)
+  ret <vscale x 1 x i32> %x
+}
+
+define <vscale x 1 x i32> @vandn_vx_vp_nxv1i32(i32 %a, <vscale x 1 x i32> %b, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vx_vp_nxv1i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    not a0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vx_vp_nxv1i32:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vx v8, v8, a0, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = xor i32 %a, -1
+  %head.not.a = insertelement <vscale x 1 x i32> poison, i32 %not.a, i32 0
+  %splat.not.a = shufflevector <vscale x 1 x i32> %head.not.a, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
+  %x = call <vscale x 1 x i32> @llvm.vp.and.nxv1i32(<vscale x 1 x i32> %b, <vscale x 1 x i32> %splat.not.a, <vscale x 1 x i1> %mask, i32 %evl)
+  ret <vscale x 1 x i32> %x
+}
+
+declare <vscale x 2 x i32> @llvm.vp.and.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
+declare <vscale x 2 x i32> @llvm.vp.xor.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i32> @vandn_vv_vp_nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vv_vp_nxv2i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vnot.v v8, v8, v0.t
+; CHECK-NEXT:    vand.vv v8, v8, v9, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vv_vp_nxv2i32:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vv v8, v9, v8, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = call <vscale x 2 x i32> @llvm.vp.xor.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> shufflevector(<vscale x 2 x i32> insertelement(<vscale x 2 x i32> poison, i32 -1, i32 0), <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer), <vscale x 2 x i1> %mask, i32 %evl)
+  %x = call <vscale x 2 x i32> @llvm.vp.and.nxv2i32(<vscale x 2 x i32> %not.a, <vscale x 2 x i32> %b, <vscale x 2 x i1> %mask, i32 %evl)
+  ret <vscale x 2 x i32> %x
+}
+
+define <vscale x 2 x i32> @vandn_vv_vp_swapped_nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vv_vp_swapped_nxv2i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vnot.v v8, v8, v0.t
+; CHECK-NEXT:    vand.vv v8, v9, v8, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vv_vp_swapped_nxv2i32:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vv v8, v9, v8, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = call <vscale x 2 x i32> @llvm.vp.xor.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> shufflevector(<vscale x 2 x i32> insertelement(<vscale x 2 x i32> poison, i32 -1, i32 0), <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer), <vscale x 2 x i1> %mask, i32 %evl)
+  %x = call <vscale x 2 x i32> @llvm.vp.and.nxv2i32(<vscale x 2 x i32> %b, <vscale x 2 x i32> %not.a, <vscale x 2 x i1> %mask, i32 %evl)
+  ret <vscale x 2 x i32> %x
+}
+
+define <vscale x 2 x i32> @vandn_vx_vp_nxv2i32(i32 %a, <vscale x 2 x i32> %b, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vx_vp_nxv2i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    not a0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vx_vp_nxv2i32:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vx v8, v8, a0, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = xor i32 %a, -1
+  %head.not.a = insertelement <vscale x 2 x i32> poison, i32 %not.a, i32 0
+  %splat.not.a = shufflevector <vscale x 2 x i32> %head.not.a, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+  %x = call <vscale x 2 x i32> @llvm.vp.and.nxv2i32(<vscale x 2 x i32> %b, <vscale x 2 x i32> %splat.not.a, <vscale x 2 x i1> %mask, i32 %evl)
+  ret <vscale x 2 x i32> %x
+}
+
+declare <vscale x 4 x i32> @llvm.vp.and.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
+declare <vscale x 4 x i32> @llvm.vp.xor.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i32> @vandn_vv_vp_nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vv_vp_nxv4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    vnot.v v8, v8, v0.t
+; CHECK-NEXT:    vand.vv v8, v8, v10, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vv_vp_nxv4i32:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vv v8, v10, v8, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = call <vscale x 4 x i32> @llvm.vp.xor.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> shufflevector(<vscale x 4 x i32> insertelement(<vscale x 4 x i32> poison, i32 -1, i32 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x i1> %mask, i32 %evl)
+  %x = call <vscale x 4 x i32> @llvm.vp.and.nxv4i32(<vscale x 4 x i32> %not.a, <vscale x 4 x i32> %b, <vscale x 4 x i1> %mask, i32 %evl)
+  ret <vscale x 4 x i32> %x
+}
+
+define <vscale x 4 x i32> @vandn_vv_vp_swapped_nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vv_vp_swapped_nxv4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    vnot.v v8, v8, v0.t
+; CHECK-NEXT:    vand.vv v8, v10, v8, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vv_vp_swapped_nxv4i32:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vv v8, v10, v8, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = call <vscale x 4 x i32> @llvm.vp.xor.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> shufflevector(<vscale x 4 x i32> insertelement(<vscale x 4 x i32> poison, i32 -1, i32 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x i1> %mask, i32 %evl)
+  %x = call <vscale x 4 x i32> @llvm.vp.and.nxv4i32(<vscale x 4 x i32> %b, <vscale x 4 x i32> %not.a, <vscale x 4 x i1> %mask, i32 %evl)
+  ret <vscale x 4 x i32> %x
+}
+
+define <vscale x 4 x i32> @vandn_vx_vp_nxv4i32(i32 %a, <vscale x 4 x i32> %b, <vscale x 4 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vx_vp_nxv4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    not a0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vx_vp_nxv4i32:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vx v8, v8, a0, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = xor i32 %a, -1
+  %head.not.a = insertelement <vscale x 4 x i32> poison, i32 %not.a, i32 0
+  %splat.not.a = shufflevector <vscale x 4 x i32> %head.not.a, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+  %x = call <vscale x 4 x i32> @llvm.vp.and.nxv4i32(<vscale x 4 x i32> %b, <vscale x 4 x i32> %splat.not.a, <vscale x 4 x i1> %mask, i32 %evl)
+  ret <vscale x 4 x i32> %x
+}
+
+declare <vscale x 8 x i32> @llvm.vp.and.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i1>, i32)
+declare <vscale x 8 x i32> @llvm.vp.xor.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i32> @vandn_vv_vp_nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vv_vp_nxv8i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    vnot.v v8, v8, v0.t
+; CHECK-NEXT:    vand.vv v8, v8, v12, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vv_vp_nxv8i32:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vv v8, v12, v8, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = call <vscale x 8 x i32> @llvm.vp.xor.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> shufflevector(<vscale x 8 x i32> insertelement(<vscale x 8 x i32> poison, i32 -1, i32 0), <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer), <vscale x 8 x i1> %mask, i32 %evl)
+  %x = call <vscale x 8 x i32> @llvm.vp.and.nxv8i32(<vscale x 8 x i32> %not.a, <vscale x 8 x i32> %b, <vscale x 8 x i1> %mask, i32 %evl)
+  ret <vscale x 8 x i32> %x
+}
+
+define <vscale x 8 x i32> @vandn_vv_vp_swapped_nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vv_vp_swapped_nxv8i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    vnot.v v8, v8, v0.t
+; CHECK-NEXT:    vand.vv v8, v12, v8, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vv_vp_swapped_nxv8i32:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vv v8, v12, v8, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = call <vscale x 8 x i32> @llvm.vp.xor.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> shufflevector(<vscale x 8 x i32> insertelement(<vscale x 8 x i32> poison, i32 -1, i32 0), <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer), <vscale x 8 x i1> %mask, i32 %evl)
+  %x = call <vscale x 8 x i32> @llvm.vp.and.nxv8i32(<vscale x 8 x i32> %b, <vscale x 8 x i32> %not.a, <vscale x 8 x i1> %mask, i32 %evl)
+  ret <vscale x 8 x i32> %x
+}
+
+define <vscale x 8 x i32> @vandn_vx_vp_nxv8i32(i32 %a, <vscale x 8 x i32> %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vx_vp_nxv8i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    not a0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vx_vp_nxv8i32:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vx v8, v8, a0, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = xor i32 %a, -1
+  %head.not.a = insertelement <vscale x 8 x i32> poison, i32 %not.a, i32 0
+  %splat.not.a = shufflevector <vscale x 8 x i32> %head.not.a, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
+  %x = call <vscale x 8 x i32> @llvm.vp.and.nxv8i32(<vscale x 8 x i32> %b, <vscale x 8 x i32> %splat.not.a, <vscale x 8 x i1> %mask, i32 %evl)
+  ret <vscale x 8 x i32> %x
+}
+
+declare <vscale x 16 x i32> @llvm.vp.and.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i1>, i32)
+declare <vscale x 16 x i32> @llvm.vp.xor.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i1>, i32)
+
+define <vscale x 16 x i32> @vandn_vv_vp_nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, <vscale x 16 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vv_vp_nxv16i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    vnot.v v8, v8, v0.t
+; CHECK-NEXT:    vand.vv v8, v8, v16, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vv_vp_nxv16i32:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vv v8, v16, v8, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = call <vscale x 16 x i32> @llvm.vp.xor.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> shufflevector(<vscale x 16 x i32> insertelement(<vscale x 16 x i32> poison, i32 -1, i32 0), <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer), <vscale x 16 x i1> %mask, i32 %evl)
+  %x = call <vscale x 16 x i32> @llvm.vp.and.nxv16i32(<vscale x 16 x i32> %not.a, <vscale x 16 x i32> %b, <vscale x 16 x i1> %mask, i32 %evl)
+  ret <vscale x 16 x i32> %x
+}
+
+define <vscale x 16 x i32> @vandn_vv_vp_swapped_nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, <vscale x 16 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vv_vp_swapped_nxv16i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    vnot.v v8, v8, v0.t
+; CHECK-NEXT:    vand.vv v8, v16, v8, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vv_vp_swapped_nxv16i32:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vv v8, v16, v8, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = call <vscale x 16 x i32> @llvm.vp.xor.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> shufflevector(<vscale x 16 x i32> insertelement(<vscale x 16 x i32> poison, i32 -1, i32 0), <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer), <vscale x 16 x i1> %mask, i32 %evl)
+  %x = call <vscale x 16 x i32> @llvm.vp.and.nxv16i32(<vscale x 16 x i32> %b, <vscale x 16 x i32> %not.a, <vscale x 16 x i1> %mask, i32 %evl)
+  ret <vscale x 16 x i32> %x
+}
+
+define <vscale x 16 x i32> @vandn_vx_vp_nxv16i32(i32 %a, <vscale x 16 x i32> %b, <vscale x 16 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vx_vp_nxv16i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    not a0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vx_vp_nxv16i32:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vx v8, v8, a0, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = xor i32 %a, -1
+  %head.not.a = insertelement <vscale x 16 x i32> poison, i32 %not.a, i32 0
+  %splat.not.a = shufflevector <vscale x 16 x i32> %head.not.a, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
+  %x = call <vscale x 16 x i32> @llvm.vp.and.nxv16i32(<vscale x 16 x i32> %b, <vscale x 16 x i32> %splat.not.a, <vscale x 16 x i1> %mask, i32 %evl)
+  ret <vscale x 16 x i32> %x
+}
+
+declare <vscale x 1 x i64> @llvm.vp.and.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
+declare <vscale x 1 x i64> @llvm.vp.xor.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i64> @vandn_vv_vp_nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vv_vp_nxv1i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    vnot.v v8, v8, v0.t
+; CHECK-NEXT:    vand.vv v8, v8, v9, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vv_vp_nxv1i64:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vv v8, v9, v8, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = call <vscale x 1 x i64> @llvm.vp.xor.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> shufflevector(<vscale x 1 x i64> insertelement(<vscale x 1 x i64> poison, i64 -1, i32 0), <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer), <vscale x 1 x i1> %mask, i32 %evl)
+  %x = call <vscale x 1 x i64> @llvm.vp.and.nxv1i64(<vscale x 1 x i64> %not.a, <vscale x 1 x i64> %b, <vscale x 1 x i1> %mask, i32 %evl)
+  ret <vscale x 1 x i64> %x
+}
+
+define <vscale x 1 x i64> @vandn_vv_vp_swapped_nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vv_vp_swapped_nxv1i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    vnot.v v8, v8, v0.t
+; CHECK-NEXT:    vand.vv v8, v9, v8, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vv_vp_swapped_nxv1i64:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vv v8, v9, v8, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = call <vscale x 1 x i64> @llvm.vp.xor.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> shufflevector(<vscale x 1 x i64> insertelement(<vscale x 1 x i64> poison, i64 -1, i32 0), <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer), <vscale x 1 x i1> %mask, i32 %evl)
+  %x = call <vscale x 1 x i64> @llvm.vp.and.nxv1i64(<vscale x 1 x i64> %b, <vscale x 1 x i64> %not.a, <vscale x 1 x i1> %mask, i32 %evl)
+  ret <vscale x 1 x i64> %x
+}
+
+define <vscale x 1 x i64> @vandn_vx_vp_nxv1i64(i64 %a, <vscale x 1 x i64> %b, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: vandn_vx_vp_nxv1i64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    addi sp, sp, -16
+; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-RV32-NEXT:    not a0, a0
+; CHECK-RV32-NEXT:    not a1, a1
+; CHECK-RV32-NEXT:    sw a1, 12(sp)
+; CHECK-RV32-NEXT:    sw a0, 8(sp)
+; CHECK-RV32-NEXT:    addi a0, sp, 8
+; CHECK-RV32-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
+; CHECK-RV32-NEXT:    vlse64.v v9, (a0), zero
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
+; CHECK-RV32-NEXT:    vand.vv v8, v8, v9, v0.t
+; CHECK-RV32-NEXT:    addi sp, sp, 16
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: vandn_vx_vp_nxv1i64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    not a0, a0
+; CHECK-RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; CHECK-RV64-NEXT:    ret
+;
+; CHECK-ZVBB32-LABEL: vandn_vx_vp_nxv1i64:
+; CHECK-ZVBB32:       # %bb.0:
+; CHECK-ZVBB32-NEXT:    addi sp, sp, -16
+; CHECK-ZVBB32-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-ZVBB32-NEXT:    not a0, a0
+; CHECK-ZVBB32-NEXT:    not a1, a1
+; CHECK-ZVBB32-NEXT:    sw a1, 12(sp)
+; CHECK-ZVBB32-NEXT:    sw a0, 8(sp)
+; CHECK-ZVBB32-NEXT:    addi a0, sp, 8
+; CHECK-ZVBB32-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
+; CHECK-ZVBB32-NEXT:    vlse64.v v9, (a0), zero
+; CHECK-ZVBB32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
+; CHECK-ZVBB32-NEXT:    vand.vv v8, v8, v9, v0.t
+; CHECK-ZVBB32-NEXT:    addi sp, sp, 16
+; CHECK-ZVBB32-NEXT:    ret
+;
+; CHECK-ZVBB64-LABEL: vandn_vx_vp_nxv1i64:
+; CHECK-ZVBB64:       # %bb.0:
+; CHECK-ZVBB64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-ZVBB64-NEXT:    vandn.vx v8, v8, a0, v0.t
+; CHECK-ZVBB64-NEXT:    ret
+  %not.a = xor i64 %a, -1
+  %head.not.a = insertelement <vscale x 1 x i64> poison, i64 %not.a, i32 0
+  %splat.not.a = shufflevector <vscale x 1 x i64> %head.not.a, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
+  %x = call <vscale x 1 x i64> @llvm.vp.and.nxv1i64(<vscale x 1 x i64> %b, <vscale x 1 x i64> %splat.not.a, <vscale x 1 x i1> %mask, i32 %evl)
+  ret <vscale x 1 x i64> %x
+}
+
+declare <vscale x 2 x i64> @llvm.vp.and.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i32)
+declare <vscale x 2 x i64> @llvm.vp.xor.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i64> @vandn_vv_vp_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vv_vp_nxv2i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    vnot.v v8, v8, v0.t
+; CHECK-NEXT:    vand.vv v8, v8, v10, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vv_vp_nxv2i64:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vv v8, v10, v8, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = call <vscale x 2 x i64> @llvm.vp.xor.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> shufflevector(<vscale x 2 x i64> insertelement(<vscale x 2 x i64> poison, i64 -1, i32 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer), <vscale x 2 x i1> %mask, i32 %evl)
+  %x = call <vscale x 2 x i64> @llvm.vp.and.nxv2i64(<vscale x 2 x i64> %not.a, <vscale x 2 x i64> %b, <vscale x 2 x i1> %mask, i32 %evl)
+  ret <vscale x 2 x i64> %x
+}
+
+define <vscale x 2 x i64> @vandn_vv_vp_swapped_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vv_vp_swapped_nxv2i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    vnot.v v8, v8, v0.t
+; CHECK-NEXT:    vand.vv v8, v10, v8, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vv_vp_swapped_nxv2i64:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vv v8, v10, v8, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = call <vscale x 2 x i64> @llvm.vp.xor.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> shufflevector(<vscale x 2 x i64> insertelement(<vscale x 2 x i64> poison, i64 -1, i32 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer), <vscale x 2 x i1> %mask, i32 %evl)
+  %x = call <vscale x 2 x i64> @llvm.vp.and.nxv2i64(<vscale x 2 x i64> %b, <vscale x 2 x i64> %not.a, <vscale x 2 x i1> %mask, i32 %evl)
+  ret <vscale x 2 x i64> %x
+}
+
+define <vscale x 2 x i64> @vandn_vx_vp_nxv2i64(i64 %a, <vscale x 2 x i64> %b, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: vandn_vx_vp_nxv2i64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    addi sp, sp, -16
+; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-RV32-NEXT:    not a0, a0
+; CHECK-RV32-NEXT:    not a1, a1
+; CHECK-RV32-NEXT:    sw a1, 12(sp)
+; CHECK-RV32-NEXT:    sw a0, 8(sp)
+; CHECK-RV32-NEXT:    addi a0, sp, 8
+; CHECK-RV32-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
+; CHECK-RV32-NEXT:    vlse64.v v10, (a0), zero
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
+; CHECK-RV32-NEXT:    vand.vv v8, v8, v10, v0.t
+; CHECK-RV32-NEXT:    addi sp, sp, 16
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: vandn_vx_vp_nxv2i64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    not a0, a0
+; CHECK-RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; CHECK-RV64-NEXT:    ret
+;
+; CHECK-ZVBB32-LABEL: vandn_vx_vp_nxv2i64:
+; CHECK-ZVBB32:       # %bb.0:
+; CHECK-ZVBB32-NEXT:    addi sp, sp, -16
+; CHECK-ZVBB32-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-ZVBB32-NEXT:    not a0, a0
+; CHECK-ZVBB32-NEXT:    not a1, a1
+; CHECK-ZVBB32-NEXT:    sw a1, 12(sp)
+; CHECK-ZVBB32-NEXT:    sw a0, 8(sp)
+; CHECK-ZVBB32-NEXT:    addi a0, sp, 8
+; CHECK-ZVBB32-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
+; CHECK-ZVBB32-NEXT:    vlse64.v v10, (a0), zero
+; CHECK-ZVBB32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
+; CHECK-ZVBB32-NEXT:    vand.vv v8, v8, v10, v0.t
+; CHECK-ZVBB32-NEXT:    addi sp, sp, 16
+; CHECK-ZVBB32-NEXT:    ret
+;
+; CHECK-ZVBB64-LABEL: vandn_vx_vp_nxv2i64:
+; CHECK-ZVBB64:       # %bb.0:
+; CHECK-ZVBB64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-ZVBB64-NEXT:    vandn.vx v8, v8, a0, v0.t
+; CHECK-ZVBB64-NEXT:    ret
+  %not.a = xor i64 %a, -1
+  %head.not.a = insertelement <vscale x 2 x i64> poison, i64 %not.a, i32 0
+  %splat.not.a = shufflevector <vscale x 2 x i64> %head.not.a, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+  %x = call <vscale x 2 x i64> @llvm.vp.and.nxv2i64(<vscale x 2 x i64> %b, <vscale x 2 x i64> %splat.not.a, <vscale x 2 x i1> %mask, i32 %evl)
+  ret <vscale x 2 x i64> %x
+}
+
+declare <vscale x 4 x i64> @llvm.vp.and.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i1>, i32)
+declare <vscale x 4 x i64> @llvm.vp.xor.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i64> @vandn_vv_vp_nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, <vscale x 4 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vv_vp_nxv4i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    vnot.v v8, v8, v0.t
+; CHECK-NEXT:    vand.vv v8, v8, v12, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vv_vp_nxv4i64:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vv v8, v12, v8, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = call <vscale x 4 x i64> @llvm.vp.xor.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> shufflevector(<vscale x 4 x i64> insertelement(<vscale x 4 x i64> poison, i64 -1, i32 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x i1> %mask, i32 %evl)
+  %x = call <vscale x 4 x i64> @llvm.vp.and.nxv4i64(<vscale x 4 x i64> %not.a, <vscale x 4 x i64> %b, <vscale x 4 x i1> %mask, i32 %evl)
+  ret <vscale x 4 x i64> %x
+}
+
+define <vscale x 4 x i64> @vandn_vv_vp_swapped_nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, <vscale x 4 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vv_vp_swapped_nxv4i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    vnot.v v8, v8, v0.t
+; CHECK-NEXT:    vand.vv v8, v12, v8, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vv_vp_swapped_nxv4i64:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vv v8, v12, v8, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = call <vscale x 4 x i64> @llvm.vp.xor.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> shufflevector(<vscale x 4 x i64> insertelement(<vscale x 4 x i64> poison, i64 -1, i32 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x i1> %mask, i32 %evl)
+  %x = call <vscale x 4 x i64> @llvm.vp.and.nxv4i64(<vscale x 4 x i64> %b, <vscale x 4 x i64> %not.a, <vscale x 4 x i1> %mask, i32 %evl)
+  ret <vscale x 4 x i64> %x
+}
+
+define <vscale x 4 x i64> @vandn_vx_vp_nxv4i64(i64 %a, <vscale x 4 x i64> %b, <vscale x 4 x i1> %mask, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: vandn_vx_vp_nxv4i64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    addi sp, sp, -16
+; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-RV32-NEXT:    not a0, a0
+; CHECK-RV32-NEXT:    not a1, a1
+; CHECK-RV32-NEXT:    sw a1, 12(sp)
+; CHECK-RV32-NEXT:    sw a0, 8(sp)
+; CHECK-RV32-NEXT:    addi a0, sp, 8
+; CHECK-RV32-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
+; CHECK-RV32-NEXT:    vlse64.v v12, (a0), zero
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
+; CHECK-RV32-NEXT:    vand.vv v8, v8, v12, v0.t
+; CHECK-RV32-NEXT:    addi sp, sp, 16
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: vandn_vx_vp_nxv4i64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    not a0, a0
+; CHECK-RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; CHECK-RV64-NEXT:    ret
+;
+; CHECK-ZVBB32-LABEL: vandn_vx_vp_nxv4i64:
+; CHECK-ZVBB32:       # %bb.0:
+; CHECK-ZVBB32-NEXT:    addi sp, sp, -16
+; CHECK-ZVBB32-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-ZVBB32-NEXT:    not a0, a0
+; CHECK-ZVBB32-NEXT:    not a1, a1
+; CHECK-ZVBB32-NEXT:    sw a1, 12(sp)
+; CHECK-ZVBB32-NEXT:    sw a0, 8(sp)
+; CHECK-ZVBB32-NEXT:    addi a0, sp, 8
+; CHECK-ZVBB32-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
+; CHECK-ZVBB32-NEXT:    vlse64.v v12, (a0), zero
+; CHECK-ZVBB32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
+; CHECK-ZVBB32-NEXT:    vand.vv v8, v8, v12, v0.t
+; CHECK-ZVBB32-NEXT:    addi sp, sp, 16
+; CHECK-ZVBB32-NEXT:    ret
+;
+; CHECK-ZVBB64-LABEL: vandn_vx_vp_nxv4i64:
+; CHECK-ZVBB64:       # %bb.0:
+; CHECK-ZVBB64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-ZVBB64-NEXT:    vandn.vx v8, v8, a0, v0.t
+; CHECK-ZVBB64-NEXT:    ret
+  %not.a = xor i64 %a, -1
+  %head.not.a = insertelement <vscale x 4 x i64> poison, i64 %not.a, i32 0
+  %splat.not.a = shufflevector <vscale x 4 x i64> %head.not.a, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+  %x = call <vscale x 4 x i64> @llvm.vp.and.nxv4i64(<vscale x 4 x i64> %b, <vscale x 4 x i64> %splat.not.a, <vscale x 4 x i1> %mask, i32 %evl)
+  ret <vscale x 4 x i64> %x
+}
+
+declare <vscale x 8 x i64> @llvm.vp.and.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i32)
+declare <vscale x 8 x i64> @llvm.vp.xor.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i64> @vandn_vv_vp_nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vv_vp_nxv8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    vnot.v v8, v8, v0.t
+; CHECK-NEXT:    vand.vv v8, v8, v16, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vv_vp_nxv8i64:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vv v8, v16, v8, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = call <vscale x 8 x i64> @llvm.vp.xor.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> shufflevector(<vscale x 8 x i64> insertelement(<vscale x 8 x i64> poison, i64 -1, i32 0), <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer), <vscale x 8 x i1> %mask, i32 %evl)
+  %x = call <vscale x 8 x i64> @llvm.vp.and.nxv8i64(<vscale x 8 x i64> %not.a, <vscale x 8 x i64> %b, <vscale x 8 x i1> %mask, i32 %evl)
+  ret <vscale x 8 x i64> %x
+}
+
+define <vscale x 8 x i64> @vandn_vv_vp_swapped_nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vandn_vv_vp_swapped_nxv8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    vnot.v v8, v8, v0.t
+; CHECK-NEXT:    vand.vv v8, v16, v8, v0.t
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vandn_vv_vp_swapped_nxv8i64:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-ZVBB-NEXT:    vandn.vv v8, v16, v8, v0.t
+; CHECK-ZVBB-NEXT:    ret
+  %not.a = call <vscale x 8 x i64> @llvm.vp.xor.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> shufflevector(<vscale x 8 x i64> insertelement(<vscale x 8 x i64> poison, i64 -1, i32 0), <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer), <vscale x 8 x i1> %mask, i32 %evl)
+  %x = call <vscale x 8 x i64> @llvm.vp.and.nxv8i64(<vscale x 8 x i64> %b, <vscale x 8 x i64> %not.a, <vscale x 8 x i1> %mask, i32 %evl)
+  ret <vscale x 8 x i64> %x
+}
+
+define <vscale x 8 x i64> @vandn_vx_vp_nxv8i64(i64 %a, <vscale x 8 x i64> %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
+; CHECK-RV32-LABEL: vandn_vx_vp_nxv8i64:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    addi sp, sp, -16
+; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-RV32-NEXT:    not a0, a0
+; CHECK-RV32-NEXT:    not a1, a1
+; CHECK-RV32-NEXT:    sw a1, 12(sp)
+; CHECK-RV32-NEXT:    sw a0, 8(sp)
+; CHECK-RV32-NEXT:    addi a0, sp, 8
+; CHECK-RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; CHECK-RV32-NEXT:    vlse64.v v16, (a0), zero
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
+; CHECK-RV32-NEXT:    vand.vv v8, v8, v16, v0.t
+; CHECK-RV32-NEXT:    addi sp, sp, 16
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: vandn_vx_vp_nxv8i64:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    not a0, a0
+; CHECK-RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; CHECK-RV64-NEXT:    ret
+;
+; CHECK-ZVBB32-LABEL: vandn_vx_vp_nxv8i64:
+; CHECK-ZVBB32:       # %bb.0:
+; CHECK-ZVBB32-NEXT:    addi sp, sp, -16
+; CHECK-ZVBB32-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-ZVBB32-NEXT:    not a0, a0
+; CHECK-ZVBB32-NEXT:    not a1, a1
+; CHECK-ZVBB32-NEXT:    sw a1, 12(sp)
+; CHECK-ZVBB32-NEXT:    sw a0, 8(sp)
+; CHECK-ZVBB32-NEXT:    addi a0, sp, 8
+; CHECK-ZVBB32-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; CHECK-ZVBB32-NEXT:    vlse64.v v16, (a0), zero
+; CHECK-ZVBB32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
+; CHECK-ZVBB32-NEXT:    vand.vv v8, v8, v16, v0.t
+; CHECK-ZVBB32-NEXT:    addi sp, sp, 16
+; CHECK-ZVBB32-NEXT:    ret
+;
+; CHECK-ZVBB64-LABEL: vandn_vx_vp_nxv8i64:
+; CHECK-ZVBB64:       # %bb.0:
+; CHECK-ZVBB64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-ZVBB64-NEXT:    vandn.vx v8, v8, a0, v0.t
+; CHECK-ZVBB64-NEXT:    ret
+  %not.a = xor i64 %a, -1
+  %head.not.a = insertelement <vscale x 8 x i64> poison, i64 %not.a, i32 0
+  %splat.not.a = shufflevector <vscale x 8 x i64> %head.not.a, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
+  %x = call <vscale x 8 x i64> @llvm.vp.and.nxv8i64(<vscale x 8 x i64> %b, <vscale x 8 x i64> %splat.not.a, <vscale x 8 x i1> %mask, i32 %evl)
+  ret <vscale x 8 x i64> %x
+}
+


        


More information about the llvm-commits mailing list