[llvm] e6b1ac8 - [LegalizeTypes][VP] Add widening support for binary VP ops

Fraser Cormack via llvm-commits llvm-commits at lists.llvm.org
Thu Aug 19 05:18:23 PDT 2021


Author: Fraser Cormack
Date: 2021-08-19T13:08:47+01:00
New Revision: e6b1ac85461ac37526fcc3157a15880c4b289b97

URL: https://github.com/llvm/llvm-project/commit/e6b1ac85461ac37526fcc3157a15880c4b289b97
DIFF: https://github.com/llvm/llvm-project/commit/e6b1ac85461ac37526fcc3157a15880c4b289b97.diff

LOG: [LegalizeTypes][VP] Add widening support for binary VP ops

This patch adds the beginnings of more thorough support in the
legalizers for vector-predicated (VP) operations.

The first step is the ability to widen illegal vectors. The more
complicated scenario in which the result/operands need widening but the
mask doesn't has not been handled here. That would require a lot of code
without an in-tree target on which to test it.

Reviewed By: RKSimon

Differential Revision: https://reviews.llvm.org/D107904

Added: 
    

Modified: 
    llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
    llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdiv-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vor-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vshl-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsra-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsrl-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll
    llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll
    llvm/test/CodeGen/RISCV/rvv/vand-vp.ll
    llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll
    llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll
    llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll
    llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll
    llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll
    llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll
    llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll
    llvm/test/CodeGen/RISCV/rvv/vor-vp.ll
    llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll
    llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll
    llvm/test/CodeGen/RISCV/rvv/vshl-vp.ll
    llvm/test/CodeGen/RISCV/rvv/vsra-vp.ll
    llvm/test/CodeGen/RISCV/rvv/vsrl-vp.ll
    llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll
    llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index bfd3927f8a16..508823441e9f 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -915,7 +915,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
   SDValue WidenVecRes_VECTOR_SHUFFLE(ShuffleVectorSDNode *N);
 
   SDValue WidenVecRes_Ternary(SDNode *N);
-  SDValue WidenVecRes_Binary(SDNode *N);
+  SDValue WidenVecRes_Binary(SDNode *N, bool IsVP);
   SDValue WidenVecRes_BinaryCanTrap(SDNode *N);
   SDValue WidenVecRes_BinaryWithExtraScalarOp(SDNode *N);
   SDValue WidenVecRes_StrictFP(SDNode *N);

diff  --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index d3ebf6ca89ce..2c6cef8ebf16 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -3070,7 +3070,7 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
   case ISD::USHLSAT:
   case ISD::ROTL:
   case ISD::ROTR:
-    Res = WidenVecRes_Binary(N);
+    Res = WidenVecRes_Binary(N, /*IsVP*/ false);
     break;
 
   case ISD::FADD:
@@ -3194,6 +3194,31 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
   case ISD::FSHR:
     Res = WidenVecRes_Ternary(N);
     break;
+  case ISD::VP_ADD:
+  case ISD::VP_AND:
+  case ISD::VP_MUL:
+  case ISD::VP_OR:
+  case ISD::VP_SUB:
+  case ISD::VP_XOR:
+  case ISD::VP_SHL:
+  case ISD::VP_LSHR:
+  case ISD::VP_ASHR:
+  case ISD::VP_SDIV:
+  case ISD::VP_UDIV:
+  case ISD::VP_SREM:
+  case ISD::VP_UREM:
+  case ISD::VP_FADD:
+  case ISD::VP_FSUB:
+  case ISD::VP_FMUL:
+  case ISD::VP_FDIV:
+  case ISD::VP_FREM:
+    // Vector-predicated binary op widening. Note that -- unlike the
+    // unpredicated versions -- we don't have to worry about trapping on
+    // operations like UDIV, FADD, etc., as we pass on the original vector
+    // length parameter. This means the widened elements containing garbage
+    // aren't active.
+    Res = WidenVecRes_Binary(N, /*IsVP*/ true);
+    break;
   }
 
   // If Res is null, the sub-method took care of registering the result.
@@ -3211,13 +3236,31 @@ SDValue DAGTypeLegalizer::WidenVecRes_Ternary(SDNode *N) {
   return DAG.getNode(N->getOpcode(), dl, WidenVT, InOp1, InOp2, InOp3);
 }
 
-SDValue DAGTypeLegalizer::WidenVecRes_Binary(SDNode *N) {
+SDValue DAGTypeLegalizer::WidenVecRes_Binary(SDNode *N, bool IsVP) {
   // Binary op widening.
   SDLoc dl(N);
   EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
   SDValue InOp1 = GetWidenedVector(N->getOperand(0));
   SDValue InOp2 = GetWidenedVector(N->getOperand(1));
-  return DAG.getNode(N->getOpcode(), dl, WidenVT, InOp1, InOp2, N->getFlags());
+  if (!IsVP)
+    return DAG.getNode(N->getOpcode(), dl, WidenVT, InOp1, InOp2,
+                       N->getFlags());
+  // For VP operations, we must also widen the mask. Note that the mask type
+  // may not actually need widening, leading it be split along with the VP
+  // operation.
+  // FIXME: This could lead to an infinite split/widen loop. We only handle the
+  // case where the mask needs widening to an identically-sized type as the
+  // vector inputs.
+  SDValue Mask = N->getOperand(2);
+  assert(getTypeAction(Mask.getValueType()) ==
+             TargetLowering::TypeWidenVector &&
+         "Unable to widen binary VP op");
+  Mask = GetWidenedVector(Mask);
+  assert(Mask.getValueType().getVectorElementCount() ==
+             WidenVT.getVectorElementCount() &&
+         "Unable to widen binary VP op");
+  return DAG.getNode(N->getOpcode(), dl, WidenVT,
+                     {InOp1, InOp2, Mask, N->getOperand(3)}, N->getFlags());
 }
 
 SDValue DAGTypeLegalizer::WidenVecRes_BinaryWithExtraScalarOp(SDNode *N) {

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
index ef8c3613e192..696b38e7879c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
@@ -156,6 +156,82 @@ define <4 x i8> @vadd_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) {
   ret <4 x i8> %v
 }
 
+declare <5 x i8> @llvm.vp.add.v5i8(<5 x i8>, <5 x i8>, <5 x i1>, i32)
+
+define <5 x i8> @vadd_vv_v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vadd_vv_v5i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vadd.vv v8, v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %v = call <5 x i8> @llvm.vp.add.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 %evl)
+  ret <5 x i8> %v
+}
+
+define <5 x i8> @vadd_vv_v5i8_unmasked(<5 x i8> %va, <5 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vadd_vv_v5i8_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vadd.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %head = insertelement <5 x i1> undef, i1 true, i32 0
+  %m = shufflevector <5 x i1> %head, <5 x i1> undef, <5 x i32> zeroinitializer
+  %v = call <5 x i8> @llvm.vp.add.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 %evl)
+  ret <5 x i8> %v
+}
+
+define <5 x i8> @vadd_vx_v5i8(<5 x i8> %va, i8 %b, <5 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vadd_vx_v5i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT:    vadd.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+  %elt.head = insertelement <5 x i8> undef, i8 %b, i32 0
+  %vb = shufflevector <5 x i8> %elt.head, <5 x i8> undef, <5 x i32> zeroinitializer
+  %v = call <5 x i8> @llvm.vp.add.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
+  ret <5 x i8> %v
+}
+
+define <5 x i8> @vadd_vx_v5i8_unmasked(<5 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vadd_vx_v5i8_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT:    vadd.vx v8, v8, a0
+; CHECK-NEXT:    ret
+  %elt.head = insertelement <5 x i8> undef, i8 %b, i32 0
+  %vb = shufflevector <5 x i8> %elt.head, <5 x i8> undef, <5 x i32> zeroinitializer
+  %head = insertelement <5 x i1> undef, i1 true, i32 0
+  %m = shufflevector <5 x i1> %head, <5 x i1> undef, <5 x i32> zeroinitializer
+  %v = call <5 x i8> @llvm.vp.add.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
+  ret <5 x i8> %v
+}
+
+define <5 x i8> @vadd_vi_v5i8(<5 x i8> %va, <5 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vadd_vi_v5i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
+; CHECK-NEXT:    ret
+  %elt.head = insertelement <5 x i8> undef, i8 -1, i32 0
+  %vb = shufflevector <5 x i8> %elt.head, <5 x i8> undef, <5 x i32> zeroinitializer
+  %v = call <5 x i8> @llvm.vp.add.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
+  ret <5 x i8> %v
+}
+
+define <5 x i8> @vadd_vi_v5i8_unmasked(<5 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vadd_vi_v5i8_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vadd.vi v8, v8, -1
+; CHECK-NEXT:    ret
+  %elt.head = insertelement <5 x i8> undef, i8 -1, i32 0
+  %vb = shufflevector <5 x i8> %elt.head, <5 x i8> undef, <5 x i32> zeroinitializer
+  %head = insertelement <5 x i1> undef, i1 true, i32 0
+  %m = shufflevector <5 x i1> %head, <5 x i1> undef, <5 x i32> zeroinitializer
+  %v = call <5 x i8> @llvm.vp.add.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
+  ret <5 x i8> %v
+}
+
 declare <8 x i8> @llvm.vp.add.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32)
 
 define <8 x i8> @vadd_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) {

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll
index 405089254961..6d6ff37df791 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll
@@ -1228,6 +1228,114 @@ define <8 x i64> @vand_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) {
   ret <8 x i64> %v
 }
 
+declare <11 x i64> @llvm.vp.and.v11i64(<11 x i64>, <11 x i64>, <11 x i1>, i32)
+
+define <11 x i64> @vand_vv_v11i64(<11 x i64> %va, <11 x i64> %b, <11 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vand_vv_v11i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT:    vand.vv v8, v8, v16, v0.t
+; CHECK-NEXT:    ret
+  %v = call <11 x i64> @llvm.vp.and.v11i64(<11 x i64> %va, <11 x i64> %b, <11 x i1> %m, i32 %evl)
+  ret <11 x i64> %v
+}
+
+define <11 x i64> @vand_vv_v11i64_unmasked(<11 x i64> %va, <11 x i64> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vand_vv_v11i64_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT:    vand.vv v8, v8, v16
+; CHECK-NEXT:    ret
+  %head = insertelement <11 x i1> undef, i1 true, i32 0
+  %m = shufflevector <11 x i1> %head, <11 x i1> undef, <11 x i32> zeroinitializer
+  %v = call <11 x i64> @llvm.vp.and.v11i64(<11 x i64> %va, <11 x i64> %b, <11 x i1> %m, i32 %evl)
+  ret <11 x i64> %v
+}
+
+define <11 x i64> @vand_vx_v11i64(<11 x i64> %va, i64 %b, <11 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vand_vx_v11i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vmv1r.v v25, v0
+; RV32-NEXT:    addi a3, zero, 32
+; RV32-NEXT:    vsetvli zero, a3, e32, m8, ta, mu
+; RV32-NEXT:    vmv.v.x v16, a1
+; RV32-NEXT:    lui a1, 341
+; RV32-NEXT:    addi a1, a1, 1365
+; RV32-NEXT:    vsetivli zero, 1, e32, mf2, ta, mu
+; RV32-NEXT:    vmv.s.x v0, a1
+; RV32-NEXT:    vsetvli zero, a3, e32, m8, ta, mu
+; RV32-NEXT:    vmerge.vxm v16, v16, a0, v0
+; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT:    vmv1r.v v0, v25
+; RV32-NEXT:    vand.vv v8, v8, v16, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vand_vx_v11i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT:    vand.vx v8, v8, a0, v0.t
+; RV64-NEXT:    ret
+  %elt.head = insertelement <11 x i64> undef, i64 %b, i32 0
+  %vb = shufflevector <11 x i64> %elt.head, <11 x i64> undef, <11 x i32> zeroinitializer
+  %v = call <11 x i64> @llvm.vp.and.v11i64(<11 x i64> %va, <11 x i64> %vb, <11 x i1> %m, i32 %evl)
+  ret <11 x i64> %v
+}
+
+define <11 x i64> @vand_vx_v11i64_unmasked(<11 x i64> %va, i64 %b, i32 zeroext %evl) {
+; RV32-LABEL: vand_vx_v11i64_unmasked:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi a3, zero, 32
+; RV32-NEXT:    vsetvli zero, a3, e32, m8, ta, mu
+; RV32-NEXT:    vmv.v.x v16, a1
+; RV32-NEXT:    lui a1, 341
+; RV32-NEXT:    addi a1, a1, 1365
+; RV32-NEXT:    vsetivli zero, 1, e32, mf2, ta, mu
+; RV32-NEXT:    vmv.s.x v0, a1
+; RV32-NEXT:    vsetvli zero, a3, e32, m8, ta, mu
+; RV32-NEXT:    vmerge.vxm v16, v16, a0, v0
+; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT:    vand.vv v8, v8, v16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vand_vx_v11i64_unmasked:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT:    vand.vx v8, v8, a0
+; RV64-NEXT:    ret
+  %elt.head = insertelement <11 x i64> undef, i64 %b, i32 0
+  %vb = shufflevector <11 x i64> %elt.head, <11 x i64> undef, <11 x i32> zeroinitializer
+  %head = insertelement <11 x i1> undef, i1 true, i32 0
+  %m = shufflevector <11 x i1> %head, <11 x i1> undef, <11 x i32> zeroinitializer
+  %v = call <11 x i64> @llvm.vp.and.v11i64(<11 x i64> %va, <11 x i64> %vb, <11 x i1> %m, i32 %evl)
+  ret <11 x i64> %v
+}
+
+define <11 x i64> @vand_vi_v11i64(<11 x i64> %va, <11 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vand_vi_v11i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
+; CHECK-NEXT:    ret
+  %elt.head = insertelement <11 x i64> undef, i64 4, i32 0
+  %vb = shufflevector <11 x i64> %elt.head, <11 x i64> undef, <11 x i32> zeroinitializer
+  %v = call <11 x i64> @llvm.vp.and.v11i64(<11 x i64> %va, <11 x i64> %vb, <11 x i1> %m, i32 %evl)
+  ret <11 x i64> %v
+}
+
+define <11 x i64> @vand_vi_v11i64_unmasked(<11 x i64> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vand_vi_v11i64_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT:    vand.vi v8, v8, 4
+; CHECK-NEXT:    ret
+  %elt.head = insertelement <11 x i64> undef, i64 4, i32 0
+  %vb = shufflevector <11 x i64> %elt.head, <11 x i64> undef, <11 x i32> zeroinitializer
+  %head = insertelement <11 x i1> undef, i1 true, i32 0
+  %m = shufflevector <11 x i1> %head, <11 x i1> undef, <11 x i32> zeroinitializer
+  %v = call <11 x i64> @llvm.vp.and.v11i64(<11 x i64> %va, <11 x i64> %vb, <11 x i1> %m, i32 %evl)
+  ret <11 x i64> %v
+}
+
 declare <16 x i64> @llvm.vp.and.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32)
 
 define <16 x i64> @vand_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) {

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdiv-vp.ll
index 1a7e67abbc34..ae2f58c7df4d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdiv-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdiv-vp.ll
@@ -104,6 +104,18 @@ define <4 x i8> @vdiv_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) {
   ret <4 x i8> %v
 }
 
+declare <6 x i8> @llvm.vp.sdiv.v6i8(<6 x i8>, <6 x i8>, <6 x i1>, i32)
+
+define <6 x i8> @vdiv_vv_v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vdiv_vv_v6i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vdiv.vv v8, v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %v = call <6 x i8> @llvm.vp.sdiv.v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 %evl)
+  ret <6 x i8> %v
+}
+
 declare <8 x i8> @llvm.vp.sdiv.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32)
 
 define <8 x i8> @vdiv_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) {

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll
index 559d4c1e1dc1..da9b5ce09dcf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll
@@ -104,6 +104,18 @@ define <4 x i8> @vdivu_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) {
   ret <4 x i8> %v
 }
 
+declare <6 x i8> @llvm.vp.udiv.v6i8(<6 x i8>, <6 x i8>, <6 x i1>, i32)
+
+define <6 x i8> @vdivu_vv_v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vdivu_vv_v6i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vdivu.vv v8, v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %v = call <6 x i8> @llvm.vp.udiv.v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 %evl)
+  ret <6 x i8> %v
+}
+
 declare <8 x i8> @llvm.vp.udiv.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32)
 
 define <8 x i8> @vdivu_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) {

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-vp.ll
index e6ed948c0665..9302940d29ba 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-vp.ll
@@ -56,6 +56,18 @@ define <2 x half> @vfadd_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext
   ret <2 x half> %v
 }
 
+declare <3 x half> @llvm.vp.fadd.v3f16(<3 x half>, <3 x half>, <3 x i1>, i32)
+
+define <3 x half> @vfadd_vv_v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vfadd_vv_v3f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT:    vfadd.vv v8, v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %v = call <3 x half> @llvm.vp.fadd.v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i32 %evl)
+  ret <3 x half> %v
+}
+
 declare <4 x half> @llvm.vp.fadd.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32)
 
 define <4 x half> @vfadd_vv_v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 zeroext %evl) {

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-vp.ll
index 87aaff021451..aa4ab8dba3a2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-vp.ll
@@ -56,6 +56,18 @@ define <2 x half> @vfdiv_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext
   ret <2 x half> %v
 }
 
+declare <3 x half> @llvm.vp.fdiv.v3f16(<3 x half>, <3 x half>, <3 x i1>, i32)
+
+define <3 x half> @vfdiv_vv_v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vfdiv_vv_v3f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT:    vfdiv.vv v8, v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %v = call <3 x half> @llvm.vp.fdiv.v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i32 %evl)
+  ret <3 x half> %v
+}
+
 declare <4 x half> @llvm.vp.fdiv.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32)
 
 define <4 x half> @vfdiv_vv_v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 zeroext %evl) {

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-vp.ll
index 259fbfb24728..885a16510dbb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-vp.ll
@@ -56,6 +56,18 @@ define <2 x half> @vfmul_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext
   ret <2 x half> %v
 }
 
+declare <3 x half> @llvm.vp.fmul.v3f16(<3 x half>, <3 x half>, <3 x i1>, i32)
+
+define <3 x half> @vfmul_vv_v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vfmul_vv_v3f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT:    vfmul.vv v8, v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %v = call <3 x half> @llvm.vp.fmul.v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i32 %evl)
+  ret <3 x half> %v
+}
+
 declare <4 x half> @llvm.vp.fmul.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32)
 
 define <4 x half> @vfmul_vv_v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 zeroext %evl) {

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-vp.ll
index ee04b73af882..e4158640a459 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-vp.ll
@@ -56,6 +56,18 @@ define <2 x half> @vfsub_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext
   ret <2 x half> %v
 }
 
+declare <3 x half> @llvm.vp.fsub.v3f16(<3 x half>, <3 x half>, <3 x i1>, i32)
+
+define <3 x half> @vfsub_vv_v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vfsub_vv_v3f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT:    vfsub.vv v8, v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %v = call <3 x half> @llvm.vp.fsub.v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i32 %evl)
+  ret <3 x half> %v
+}
+
 declare <4 x half> @llvm.vp.fsub.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32)
 
 define <4 x half> @vfsub_vv_v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 zeroext %evl) {

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp.ll
index 26834149778d..a1a1005741d7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp.ll
@@ -354,6 +354,56 @@ define <8 x i16> @vmul_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl
   ret <8 x i16> %v
 }
 
+declare <12 x i16> @llvm.vp.mul.v12i16(<12 x i16>, <12 x i16>, <12 x i1>, i32)
+
+define <12 x i16> @vmul_vv_v12i16(<12 x i16> %va, <12 x i16> %b, <12 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vmul_vv_v12i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT:    vmul.vv v8, v8, v10, v0.t
+; CHECK-NEXT:    ret
+  %v = call <12 x i16> @llvm.vp.mul.v12i16(<12 x i16> %va, <12 x i16> %b, <12 x i1> %m, i32 %evl)
+  ret <12 x i16> %v
+}
+
+define <12 x i16> @vmul_vv_v12i16_unmasked(<12 x i16> %va, <12 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vmul_vv_v12i16_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT:    vmul.vv v8, v8, v10
+; CHECK-NEXT:    ret
+  %head = insertelement <12 x i1> undef, i1 true, i32 0
+  %m = shufflevector <12 x i1> %head, <12 x i1> undef, <12 x i32> zeroinitializer
+  %v = call <12 x i16> @llvm.vp.mul.v12i16(<12 x i16> %va, <12 x i16> %b, <12 x i1> %m, i32 %evl)
+  ret <12 x i16> %v
+}
+
+define <12 x i16> @vmul_vx_v12i16(<12 x i16> %va, i16 %b, <12 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vmul_vx_v12i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT:    vmul.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+  %elt.head = insertelement <12 x i16> undef, i16 %b, i32 0
+  %vb = shufflevector <12 x i16> %elt.head, <12 x i16> undef, <12 x i32> zeroinitializer
+  %v = call <12 x i16> @llvm.vp.mul.v12i16(<12 x i16> %va, <12 x i16> %vb, <12 x i1> %m, i32 %evl)
+  ret <12 x i16> %v
+}
+
+define <12 x i16> @vmul_vx_v12i16_unmasked(<12 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vmul_vx_v12i16_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT:    vmul.vx v8, v8, a0
+; CHECK-NEXT:    ret
+  %elt.head = insertelement <12 x i16> undef, i16 %b, i32 0
+  %vb = shufflevector <12 x i16> %elt.head, <12 x i16> undef, <12 x i32> zeroinitializer
+  %head = insertelement <12 x i1> undef, i1 true, i32 0
+  %m = shufflevector <12 x i1> %head, <12 x i1> undef, <12 x i32> zeroinitializer
+  %v = call <12 x i16> @llvm.vp.mul.v12i16(<12 x i16> %va, <12 x i16> %vb, <12 x i1> %m, i32 %evl)
+  ret <12 x i16> %v
+}
+
 declare <16 x i16> @llvm.vp.mul.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32)
 
 define <16 x i16> @vmul_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) {

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vor-vp.ll
index 128dcf4e8546..eeb2b286fb24 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vor-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vor-vp.ll
@@ -156,6 +156,82 @@ define <4 x i8> @vor_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) {
   ret <4 x i8> %v
 }
 
+declare <7 x i8> @llvm.vp.or.v5i8(<7 x i8>, <7 x i8>, <7 x i1>, i32)
+
+define <7 x i8> @vor_vv_v5i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vor_vv_v5i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vor.vv v8, v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %v = call <7 x i8> @llvm.vp.or.v5i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> %m, i32 %evl)
+  ret <7 x i8> %v
+}
+
+define <7 x i8> @vor_vv_v5i8_unmasked(<7 x i8> %va, <7 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vor_vv_v5i8_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vor.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %head = insertelement <7 x i1> undef, i1 true, i32 0
+  %m = shufflevector <7 x i1> %head, <7 x i1> undef, <7 x i32> zeroinitializer
+  %v = call <7 x i8> @llvm.vp.or.v5i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> %m, i32 %evl)
+  ret <7 x i8> %v
+}
+
+define <7 x i8> @vor_vx_v5i8(<7 x i8> %va, i8 %b, <7 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vor_vx_v5i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT:    vor.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+  %elt.head = insertelement <7 x i8> undef, i8 %b, i32 0
+  %vb = shufflevector <7 x i8> %elt.head, <7 x i8> undef, <7 x i32> zeroinitializer
+  %v = call <7 x i8> @llvm.vp.or.v5i8(<7 x i8> %va, <7 x i8> %vb, <7 x i1> %m, i32 %evl)
+  ret <7 x i8> %v
+}
+
+define <7 x i8> @vor_vx_v5i8_unmasked(<7 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vor_vx_v5i8_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT:    vor.vx v8, v8, a0
+; CHECK-NEXT:    ret
+  %elt.head = insertelement <7 x i8> undef, i8 %b, i32 0
+  %vb = shufflevector <7 x i8> %elt.head, <7 x i8> undef, <7 x i32> zeroinitializer
+  %head = insertelement <7 x i1> undef, i1 true, i32 0
+  %m = shufflevector <7 x i1> %head, <7 x i1> undef, <7 x i32> zeroinitializer
+  %v = call <7 x i8> @llvm.vp.or.v5i8(<7 x i8> %va, <7 x i8> %vb, <7 x i1> %m, i32 %evl)
+  ret <7 x i8> %v
+}
+
+define <7 x i8> @vor_vi_v5i8(<7 x i8> %va, <7 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vor_vi_v5i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vor.vi v8, v8, 5, v0.t
+; CHECK-NEXT:    ret
+  %elt.head = insertelement <7 x i8> undef, i8 5, i32 0
+  %vb = shufflevector <7 x i8> %elt.head, <7 x i8> undef, <7 x i32> zeroinitializer
+  %v = call <7 x i8> @llvm.vp.or.v5i8(<7 x i8> %va, <7 x i8> %vb, <7 x i1> %m, i32 %evl)
+  ret <7 x i8> %v
+}
+
+define <7 x i8> @vor_vi_v5i8_unmasked(<7 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vor_vi_v5i8_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vor.vi v8, v8, 5
+; CHECK-NEXT:    ret
+  %elt.head = insertelement <7 x i8> undef, i8 5, i32 0
+  %vb = shufflevector <7 x i8> %elt.head, <7 x i8> undef, <7 x i32> zeroinitializer
+  %head = insertelement <7 x i1> undef, i1 true, i32 0
+  %m = shufflevector <7 x i1> %head, <7 x i1> undef, <7 x i32> zeroinitializer
+  %v = call <7 x i8> @llvm.vp.or.v5i8(<7 x i8> %va, <7 x i8> %vb, <7 x i1> %m, i32 %evl)
+  ret <7 x i8> %v
+}
+
 declare <8 x i8> @llvm.vp.or.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32)
 
 define <8 x i8> @vor_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) {

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll
index fe6cd06fded3..3a637eb80eb6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll
@@ -104,6 +104,18 @@ define <4 x i8> @vrem_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) {
   ret <4 x i8> %v
 }
 
+declare <6 x i8> @llvm.vp.srem.v6i8(<6 x i8>, <6 x i8>, <6 x i1>, i32)
+
+define <6 x i8> @vrem_vv_v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_v6i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vrem.vv v8, v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %v = call <6 x i8> @llvm.vp.srem.v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 %evl)
+  ret <6 x i8> %v
+}
+
 declare <8 x i8> @llvm.vp.srem.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32)
 
 define <8 x i8> @vrem_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) {

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll
index 3d0796011433..ead9220a7650 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll
@@ -104,6 +104,18 @@ define <4 x i8> @vremu_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) {
   ret <4 x i8> %v
 }
 
+declare <6 x i8> @llvm.vp.urem.v6i8(<6 x i8>, <6 x i8>, <6 x i1>, i32)
+
+define <6 x i8> @vremu_vv_v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_v6i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vremu.vv v8, v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %v = call <6 x i8> @llvm.vp.urem.v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 %evl)
+  ret <6 x i8> %v
+}
+
 declare <8 x i8> @llvm.vp.urem.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32)
 
 define <8 x i8> @vremu_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) {

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vshl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vshl-vp.ll
index 8cfdb57b8cab..98a9c9938e98 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vshl-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vshl-vp.ll
@@ -80,6 +80,18 @@ define <2 x i8> @vsll_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) {
   ret <2 x i8> %v
 }
 
+declare <3 x i8> @llvm.vp.shl.v3i8(<3 x i8>, <3 x i8>, <3 x i1>, i32)
+
+define <3 x i8> @vsll_vv_v3i8(<3 x i8> %va, <3 x i8> %b, <3 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsll_vv_v3i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT:    vsll.vv v8, v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %v = call <3 x i8> @llvm.vp.shl.v3i8(<3 x i8> %va, <3 x i8> %b, <3 x i1> %m, i32 %evl)
+  ret <3 x i8> %v
+}
+
 declare <4 x i8> @llvm.vp.shl.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32)
 
 define <4 x i8> @vsll_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) {

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsra-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsra-vp.ll
index 370edf420f57..47f7ab7df398 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsra-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsra-vp.ll
@@ -156,6 +156,18 @@ define <4 x i8> @vsra_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) {
   ret <4 x i8> %v
 }
 
+declare <7 x i8> @llvm.vp.ashr.v7i8(<7 x i8>, <7 x i8>, <7 x i1>, i32)
+
+define <7 x i8> @vsra_vv_v7i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsra_vv_v7i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vsra.vv v8, v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %v = call <7 x i8> @llvm.vp.ashr.v7i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> %m, i32 %evl)
+  ret <7 x i8> %v
+}
+
 declare <8 x i8> @llvm.vp.ashr.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32)
 
 define <8 x i8> @vsra_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) {

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsrl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsrl-vp.ll
index 1c45115110fe..0638889c5a17 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsrl-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsrl-vp.ll
@@ -156,6 +156,18 @@ define <4 x i8> @vsrl_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) {
   ret <4 x i8> %v
 }
 
+declare <7 x i8> @llvm.vp.lshr.v7i8(<7 x i8>, <7 x i8>, <7 x i1>, i32)
+
+define <7 x i8> @vsrl_vv_v7i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsrl_vv_v7i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vsrl.vv v8, v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %v = call <7 x i8> @llvm.vp.lshr.v7i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> %m, i32 %evl)
+  ret <7 x i8> %v
+}
+
 declare <8 x i8> @llvm.vp.lshr.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32)
 
 define <8 x i8> @vsrl_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) {

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp.ll
index 077b1f03290c..88955eb4540e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp.ll
@@ -54,6 +54,56 @@ define <2 x i8> @vsub_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) {
   ret <2 x i8> %v
 }
 
+declare <3 x i8> @llvm.vp.sub.v3i8(<3 x i8>, <3 x i8>, <3 x i1>, i32)
+
+define <3 x i8> @vsub_vv_v3i8(<3 x i8> %va, <3 x i8> %b, <3 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsub_vv_v3i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT:    vsub.vv v8, v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %v = call <3 x i8> @llvm.vp.sub.v3i8(<3 x i8> %va, <3 x i8> %b, <3 x i1> %m, i32 %evl)
+  ret <3 x i8> %v
+}
+
+define <3 x i8> @vsub_vv_v3i8_unmasked(<3 x i8> %va, <3 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsub_vv_v3i8_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT:    vsub.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %head = insertelement <3 x i1> undef, i1 true, i32 0
+  %m = shufflevector <3 x i1> %head, <3 x i1> undef, <3 x i32> zeroinitializer
+  %v = call <3 x i8> @llvm.vp.sub.v3i8(<3 x i8> %va, <3 x i8> %b, <3 x i1> %m, i32 %evl)
+  ret <3 x i8> %v
+}
+
+define <3 x i8> @vsub_vx_v3i8(<3 x i8> %va, i8 %b, <3 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsub_vx_v3i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT:    vsub.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+  %elt.head = insertelement <3 x i8> undef, i8 %b, i32 0
+  %vb = shufflevector <3 x i8> %elt.head, <3 x i8> undef, <3 x i32> zeroinitializer
+  %v = call <3 x i8> @llvm.vp.sub.v3i8(<3 x i8> %va, <3 x i8> %vb, <3 x i1> %m, i32 %evl)
+  ret <3 x i8> %v
+}
+
+define <3 x i8> @vsub_vx_v3i8_unmasked(<3 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsub_vx_v3i8_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT:    vsub.vx v8, v8, a0
+; CHECK-NEXT:    ret
+  %elt.head = insertelement <3 x i8> undef, i8 %b, i32 0
+  %vb = shufflevector <3 x i8> %elt.head, <3 x i8> undef, <3 x i32> zeroinitializer
+  %head = insertelement <3 x i1> undef, i1 true, i32 0
+  %m = shufflevector <3 x i1> %head, <3 x i1> undef, <3 x i32> zeroinitializer
+  %v = call <3 x i8> @llvm.vp.sub.v3i8(<3 x i8> %va, <3 x i8> %vb, <3 x i1> %m, i32 %evl)
+  ret <3 x i8> %v
+}
+
 declare <4 x i8> @llvm.vp.sub.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32)
 
 define <4 x i8> @vsub_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) {

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll
index c9b4f966fba9..b5b016364ec1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll
@@ -310,6 +310,108 @@ define <8 x i8> @vxor_vi_v8i8_unmasked_1(<8 x i8> %va, i32 zeroext %evl) {
   ret <8 x i8> %v
 }
 
+declare <9 x i8> @llvm.vp.xor.v9i8(<9 x i8>, <9 x i8>, <9 x i1>, i32)
+
+define <9 x i8> @vxor_vv_v9i8(<9 x i8> %va, <9 x i8> %b, <9 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vxor_vv_v9i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vxor.vv v8, v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %v = call <9 x i8> @llvm.vp.xor.v9i8(<9 x i8> %va, <9 x i8> %b, <9 x i1> %m, i32 %evl)
+  ret <9 x i8> %v
+}
+
+define <9 x i8> @vxor_vv_v9i8_unmasked(<9 x i8> %va, <9 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vxor_vv_v9i8_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vxor.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %head = insertelement <9 x i1> undef, i1 true, i32 0
+  %m = shufflevector <9 x i1> %head, <9 x i1> undef, <9 x i32> zeroinitializer
+  %v = call <9 x i8> @llvm.vp.xor.v9i8(<9 x i8> %va, <9 x i8> %b, <9 x i1> %m, i32 %evl)
+  ret <9 x i8> %v
+}
+
+define <9 x i8> @vxor_vx_v9i8(<9 x i8> %va, i8 %b, <9 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vxor_vx_v9i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT:    vxor.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+  %elt.head = insertelement <9 x i8> undef, i8 %b, i32 0
+  %vb = shufflevector <9 x i8> %elt.head, <9 x i8> undef, <9 x i32> zeroinitializer
+  %v = call <9 x i8> @llvm.vp.xor.v9i8(<9 x i8> %va, <9 x i8> %vb, <9 x i1> %m, i32 %evl)
+  ret <9 x i8> %v
+}
+
+define <9 x i8> @vxor_vx_v9i8_unmasked(<9 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vxor_vx_v9i8_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT:    vxor.vx v8, v8, a0
+; CHECK-NEXT:    ret
+  %elt.head = insertelement <9 x i8> undef, i8 %b, i32 0
+  %vb = shufflevector <9 x i8> %elt.head, <9 x i8> undef, <9 x i32> zeroinitializer
+  %head = insertelement <9 x i1> undef, i1 true, i32 0
+  %m = shufflevector <9 x i1> %head, <9 x i1> undef, <9 x i32> zeroinitializer
+  %v = call <9 x i8> @llvm.vp.xor.v9i8(<9 x i8> %va, <9 x i8> %vb, <9 x i1> %m, i32 %evl)
+  ret <9 x i8> %v
+}
+
+define <9 x i8> @vxor_vi_v9i8(<9 x i8> %va, <9 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vxor_vi_v9i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vxor.vi v8, v8, 7, v0.t
+; CHECK-NEXT:    ret
+  %elt.head = insertelement <9 x i8> undef, i8 7, i32 0
+  %vb = shufflevector <9 x i8> %elt.head, <9 x i8> undef, <9 x i32> zeroinitializer
+  %v = call <9 x i8> @llvm.vp.xor.v9i8(<9 x i8> %va, <9 x i8> %vb, <9 x i1> %m, i32 %evl)
+  ret <9 x i8> %v
+}
+
+define <9 x i8> @vxor_vi_v9i8_unmasked(<9 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vxor_vi_v9i8_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vxor.vi v8, v8, 7
+; CHECK-NEXT:    ret
+  %elt.head = insertelement <9 x i8> undef, i8 7, i32 0
+  %vb = shufflevector <9 x i8> %elt.head, <9 x i8> undef, <9 x i32> zeroinitializer
+  %head = insertelement <9 x i1> undef, i1 true, i32 0
+  %m = shufflevector <9 x i1> %head, <9 x i1> undef, <9 x i32> zeroinitializer
+  %v = call <9 x i8> @llvm.vp.xor.v9i8(<9 x i8> %va, <9 x i8> %vb, <9 x i1> %m, i32 %evl)
+  ret <9 x i8> %v
+}
+
+define <9 x i8> @vxor_vi_v9i8_1(<9 x i8> %va, <9 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vxor_vi_v9i8_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vnot.v v8, v8, v0.t
+; CHECK-NEXT:    ret
+  %elt.head = insertelement <9 x i8> undef, i8 -1, i32 0
+  %vb = shufflevector <9 x i8> %elt.head, <9 x i8> undef, <9 x i32> zeroinitializer
+  %v = call <9 x i8> @llvm.vp.xor.v9i8(<9 x i8> %va, <9 x i8> %vb, <9 x i1> %m, i32 %evl)
+  ret <9 x i8> %v
+}
+
+define <9 x i8> @vxor_vi_v9i8_unmasked_1(<9 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vxor_vi_v9i8_unmasked_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    ret
+  %elt.head = insertelement <9 x i8> undef, i8 -1, i32 0
+  %vb = shufflevector <9 x i8> %elt.head, <9 x i8> undef, <9 x i32> zeroinitializer
+  %head = insertelement <9 x i1> undef, i1 true, i32 0
+  %m = shufflevector <9 x i1> %head, <9 x i1> undef, <9 x i32> zeroinitializer
+  %v = call <9 x i8> @llvm.vp.xor.v9i8(<9 x i8> %va, <9 x i8> %vb, <9 x i1> %m, i32 %evl)
+  ret <9 x i8> %v
+}
+
 declare <16 x i8> @llvm.vp.xor.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32)
 
 define <16 x i8> @vxor_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) {

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll
index 10df4fc4884a..57e049e072d9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll
@@ -156,6 +156,82 @@ define <vscale x 2 x i8> @vadd_vi_nxv2i8_unmasked(<vscale x 2 x i8> %va, i32 zer
   ret <vscale x 2 x i8> %v
 }
 
+declare <vscale x 3 x i8> @llvm.vp.add.nxv3i8(<vscale x 3 x i8>, <vscale x 3 x i8>, <vscale x 3 x i1>, i32)
+
+define <vscale x 3 x i8> @vadd_vv_nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vadd_vv_nxv3i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vadd.vv v8, v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %v = call <vscale x 3 x i8> @llvm.vp.add.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 %evl)
+  ret <vscale x 3 x i8> %v
+}
+
+define <vscale x 3 x i8> @vadd_vv_nxv3i8_unmasked(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vadd_vv_nxv3i8_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vadd.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 3 x i1> undef, i1 true, i32 0
+  %m = shufflevector <vscale x 3 x i1> %head, <vscale x 3 x i1> undef, <vscale x 3 x i32> zeroinitializer
+  %v = call <vscale x 3 x i8> @llvm.vp.add.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 %evl)
+  ret <vscale x 3 x i8> %v
+}
+
+define <vscale x 3 x i8> @vadd_vx_nxv3i8(<vscale x 3 x i8> %va, i8 %b, <vscale x 3 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vadd_vx_nxv3i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT:    vadd.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+  %elt.head = insertelement <vscale x 3 x i8> undef, i8 %b, i32 0
+  %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> undef, <vscale x 3 x i32> zeroinitializer
+  %v = call <vscale x 3 x i8> @llvm.vp.add.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> %m, i32 %evl)
+  ret <vscale x 3 x i8> %v
+}
+
+define <vscale x 3 x i8> @vadd_vx_nxv3i8_unmasked(<vscale x 3 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vadd_vx_nxv3i8_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT:    vadd.vx v8, v8, a0
+; CHECK-NEXT:    ret
+  %elt.head = insertelement <vscale x 3 x i8> undef, i8 %b, i32 0
+  %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> undef, <vscale x 3 x i32> zeroinitializer
+  %head = insertelement <vscale x 3 x i1> undef, i1 true, i32 0
+  %m = shufflevector <vscale x 3 x i1> %head, <vscale x 3 x i1> undef, <vscale x 3 x i32> zeroinitializer
+  %v = call <vscale x 3 x i8> @llvm.vp.add.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> %m, i32 %evl)
+  ret <vscale x 3 x i8> %v
+}
+
+define <vscale x 3 x i8> @vadd_vi_nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vadd_vi_nxv3i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vadd.vi v8, v8, -1, v0.t
+; CHECK-NEXT:    ret
+  %elt.head = insertelement <vscale x 3 x i8> undef, i8 -1, i32 0
+  %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> undef, <vscale x 3 x i32> zeroinitializer
+  %v = call <vscale x 3 x i8> @llvm.vp.add.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> %m, i32 %evl)
+  ret <vscale x 3 x i8> %v
+}
+
+define <vscale x 3 x i8> @vadd_vi_nxv3i8_unmasked(<vscale x 3 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vadd_vi_nxv3i8_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vadd.vi v8, v8, -1
+; CHECK-NEXT:    ret
+  %elt.head = insertelement <vscale x 3 x i8> undef, i8 -1, i32 0
+  %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> undef, <vscale x 3 x i32> zeroinitializer
+  %head = insertelement <vscale x 3 x i1> undef, i1 true, i32 0
+  %m = shufflevector <vscale x 3 x i1> %head, <vscale x 3 x i1> undef, <vscale x 3 x i32> zeroinitializer
+  %v = call <vscale x 3 x i8> @llvm.vp.add.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> %m, i32 %evl)
+  ret <vscale x 3 x i8> %v
+}
+
 declare <vscale x 4 x i8> @llvm.vp.add.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
 
 define <vscale x 4 x i8> @vadd_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vand-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vand-vp.ll
index c37e1e9ceb93..b62ccc8f410a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vand-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vand-vp.ll
@@ -840,6 +840,82 @@ define <vscale x 8 x i16> @vand_vi_nxv8i16_unmasked(<vscale x 8 x i16> %va, i32
   ret <vscale x 8 x i16> %v
 }
 
+declare <vscale x 14 x i16> @llvm.vp.and.nxv14i16(<vscale x 14 x i16>, <vscale x 14 x i16>, <vscale x 14 x i1>, i32)
+
+define <vscale x 14 x i16> @vand_vv_nxv14i16(<vscale x 14 x i16> %va, <vscale x 14 x i16> %b, <vscale x 14 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vand_vv_nxv14i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT:    vand.vv v8, v8, v12, v0.t
+; CHECK-NEXT:    ret
+  %v = call <vscale x 14 x i16> @llvm.vp.and.nxv14i16(<vscale x 14 x i16> %va, <vscale x 14 x i16> %b, <vscale x 14 x i1> %m, i32 %evl)
+  ret <vscale x 14 x i16> %v
+}
+
+define <vscale x 14 x i16> @vand_vv_nxv14i16_unmasked(<vscale x 14 x i16> %va, <vscale x 14 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vand_vv_nxv14i16_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT:    vand.vv v8, v8, v12
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 14 x i1> undef, i1 true, i32 0
+  %m = shufflevector <vscale x 14 x i1> %head, <vscale x 14 x i1> undef, <vscale x 14 x i32> zeroinitializer
+  %v = call <vscale x 14 x i16> @llvm.vp.and.nxv14i16(<vscale x 14 x i16> %va, <vscale x 14 x i16> %b, <vscale x 14 x i1> %m, i32 %evl)
+  ret <vscale x 14 x i16> %v
+}
+
+define <vscale x 14 x i16> @vand_vx_nxv14i16(<vscale x 14 x i16> %va, i16 %b, <vscale x 14 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vand_vx_nxv14i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
+; CHECK-NEXT:    vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+  %elt.head = insertelement <vscale x 14 x i16> undef, i16 %b, i32 0
+  %vb = shufflevector <vscale x 14 x i16> %elt.head, <vscale x 14 x i16> undef, <vscale x 14 x i32> zeroinitializer
+  %v = call <vscale x 14 x i16> @llvm.vp.and.nxv14i16(<vscale x 14 x i16> %va, <vscale x 14 x i16> %vb, <vscale x 14 x i1> %m, i32 %evl)
+  ret <vscale x 14 x i16> %v
+}
+
+define <vscale x 14 x i16> @vand_vx_nxv14i16_unmasked(<vscale x 14 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vand_vx_nxv14i16_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:    ret
+  %elt.head = insertelement <vscale x 14 x i16> undef, i16 %b, i32 0
+  %vb = shufflevector <vscale x 14 x i16> %elt.head, <vscale x 14 x i16> undef, <vscale x 14 x i32> zeroinitializer
+  %head = insertelement <vscale x 14 x i1> undef, i1 true, i32 0
+  %m = shufflevector <vscale x 14 x i1> %head, <vscale x 14 x i1> undef, <vscale x 14 x i32> zeroinitializer
+  %v = call <vscale x 14 x i16> @llvm.vp.and.nxv14i16(<vscale x 14 x i16> %va, <vscale x 14 x i16> %vb, <vscale x 14 x i1> %m, i32 %evl)
+  ret <vscale x 14 x i16> %v
+}
+
+define <vscale x 14 x i16> @vand_vi_nxv14i16(<vscale x 14 x i16> %va, <vscale x 14 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vand_vi_nxv14i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT:    vand.vi v8, v8, 4, v0.t
+; CHECK-NEXT:    ret
+  %elt.head = insertelement <vscale x 14 x i16> undef, i16 4, i32 0
+  %vb = shufflevector <vscale x 14 x i16> %elt.head, <vscale x 14 x i16> undef, <vscale x 14 x i32> zeroinitializer
+  %v = call <vscale x 14 x i16> @llvm.vp.and.nxv14i16(<vscale x 14 x i16> %va, <vscale x 14 x i16> %vb, <vscale x 14 x i1> %m, i32 %evl)
+  ret <vscale x 14 x i16> %v
+}
+
+define <vscale x 14 x i16> @vand_vi_nxv14i16_unmasked(<vscale x 14 x i16> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vand_vi_nxv14i16_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT:    vand.vi v8, v8, 4
+; CHECK-NEXT:    ret
+  %elt.head = insertelement <vscale x 14 x i16> undef, i16 4, i32 0
+  %vb = shufflevector <vscale x 14 x i16> %elt.head, <vscale x 14 x i16> undef, <vscale x 14 x i32> zeroinitializer
+  %head = insertelement <vscale x 14 x i1> undef, i1 true, i32 0
+  %m = shufflevector <vscale x 14 x i1> %head, <vscale x 14 x i1> undef, <vscale x 14 x i32> zeroinitializer
+  %v = call <vscale x 14 x i16> @llvm.vp.and.nxv14i16(<vscale x 14 x i16> %va, <vscale x 14 x i16> %vb, <vscale x 14 x i1> %m, i32 %evl)
+  ret <vscale x 14 x i16> %v
+}
+
 declare <vscale x 16 x i16> @llvm.vp.and.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i32)
 
 define <vscale x 16 x i16> @vand_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll
index a9b0e66a7d96..c797bde9ee59 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll
@@ -104,6 +104,18 @@ define <vscale x 2 x i8> @vdiv_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b,
   ret <vscale x 2 x i8> %v
 }
 
+declare <vscale x 3 x i8> @llvm.vp.sdiv.nxv3i8(<vscale x 3 x i8>, <vscale x 3 x i8>, <vscale x 3 x i1>, i32)
+
+define <vscale x 3 x i8> @vdiv_vv_nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vdiv_vv_nxv3i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vdiv.vv v8, v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %v = call <vscale x 3 x i8> @llvm.vp.sdiv.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 %evl)
+  ret <vscale x 3 x i8> %v
+}
+
 declare <vscale x 4 x i8> @llvm.vp.sdiv.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
 
 define <vscale x 4 x i8> @vdiv_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll
index eedf9fa447e4..0b2e4b67753e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll
@@ -104,6 +104,18 @@ define <vscale x 2 x i8> @vdivu_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b,
   ret <vscale x 2 x i8> %v
 }
 
+declare <vscale x 3 x i8> @llvm.vp.udiv.nxv3i8(<vscale x 3 x i8>, <vscale x 3 x i8>, <vscale x 3 x i1>, i32)
+
+define <vscale x 3 x i8> @vdivu_vv_nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vdivu_vv_nxv3i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vdivu.vv v8, v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %v = call <vscale x 3 x i8> @llvm.vp.udiv.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 %evl)
+  ret <vscale x 3 x i8> %v
+}
+
 declare <vscale x 4 x i8> @llvm.vp.udiv.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
 
 define <vscale x 4 x i8> @vdivu_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll
index 2f33166cb392..c3c40aed3670 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll
@@ -760,6 +760,18 @@ define <vscale x 4 x double> @vfadd_vf_nxv4f64_unmasked(<vscale x 4 x double> %v
   ret <vscale x 4 x double> %v
 }
 
+declare <vscale x 7 x double> @llvm.vp.fadd.nxv7f64(<vscale x 7 x double>, <vscale x 7 x double>, <vscale x 7 x i1>, i32)
+
+define <vscale x 7 x double> @vfadd_vv_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x double> %b, <vscale x 7 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vfadd_vv_nxv7f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT:    vfadd.vv v8, v8, v16, v0.t
+; CHECK-NEXT:    ret
+  %v = call <vscale x 7 x double> @llvm.vp.fadd.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x double> %b, <vscale x 7 x i1> %m, i32 %evl)
+  ret <vscale x 7 x double> %v
+}
+
 declare <vscale x 8 x double> @llvm.vp.fadd.nxv8f64(<vscale x 8 x double>, <vscale x 8 x double>, <vscale x 8 x i1>, i32)
 
 define <vscale x 8 x double> @vfadd_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll
index c5f219c5e206..4146f215730c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll
@@ -760,6 +760,18 @@ define <vscale x 4 x double> @vfdiv_vf_nxv4f64_unmasked(<vscale x 4 x double> %v
   ret <vscale x 4 x double> %v
 }
 
+declare <vscale x 7 x double> @llvm.vp.fdiv.nxv7f64(<vscale x 7 x double>, <vscale x 7 x double>, <vscale x 7 x i1>, i32)
+
+define <vscale x 7 x double> @vfdiv_vv_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x double> %b, <vscale x 7 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vfdiv_vv_nxv7f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT:    vfdiv.vv v8, v8, v16, v0.t
+; CHECK-NEXT:    ret
+  %v = call <vscale x 7 x double> @llvm.vp.fdiv.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x double> %b, <vscale x 7 x i1> %m, i32 %evl)
+  ret <vscale x 7 x double> %v
+}
+
 declare <vscale x 8 x double> @llvm.vp.fdiv.nxv8f64(<vscale x 8 x double>, <vscale x 8 x double>, <vscale x 8 x i1>, i32)
 
 define <vscale x 8 x double> @vfdiv_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll
index f33fd4a604fe..a474d5907d95 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll
@@ -760,6 +760,18 @@ define <vscale x 4 x double> @vfmul_vf_nxv4f64_unmasked(<vscale x 4 x double> %v
   ret <vscale x 4 x double> %v
 }
 
+declare <vscale x 7 x double> @llvm.vp.fmul.nxv7f64(<vscale x 7 x double>, <vscale x 7 x double>, <vscale x 7 x i1>, i32)
+
+define <vscale x 7 x double> @vfmul_vv_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x double> %b, <vscale x 7 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vfmul_vv_nxv7f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT:    vfmul.vv v8, v8, v16, v0.t
+; CHECK-NEXT:    ret
+  %v = call <vscale x 7 x double> @llvm.vp.fmul.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x double> %b, <vscale x 7 x i1> %m, i32 %evl)
+  ret <vscale x 7 x double> %v
+}
+
 declare <vscale x 8 x double> @llvm.vp.fmul.nxv8f64(<vscale x 8 x double>, <vscale x 8 x double>, <vscale x 8 x i1>, i32)
 
 define <vscale x 8 x double> @vfmul_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll
index cfcfdf8af768..131ae96ad708 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll
@@ -760,6 +760,18 @@ define <vscale x 4 x double> @vfsub_vf_nxv4f64_unmasked(<vscale x 4 x double> %v
   ret <vscale x 4 x double> %v
 }
 
+declare <vscale x 7 x double> @llvm.vp.fsub.nxv7f64(<vscale x 7 x double>, <vscale x 7 x double>, <vscale x 7 x i1>, i32)
+
+define <vscale x 7 x double> @vfsub_vv_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x double> %b, <vscale x 7 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vfsub_vv_nxv7f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT:    vfsub.vv v8, v8, v16, v0.t
+; CHECK-NEXT:    ret
+  %v = call <vscale x 7 x double> @llvm.vp.fsub.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x double> %b, <vscale x 7 x i1> %m, i32 %evl)
+  ret <vscale x 7 x double> %v
+}
+
 declare <vscale x 8 x double> @llvm.vp.fsub.nxv8f64(<vscale x 8 x double>, <vscale x 8 x double>, <vscale x 8 x i1>, i32)
 
 define <vscale x 8 x double> @vfsub_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll
index 8d42c4c8f93b..d222e4c30ad7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll
@@ -804,6 +804,56 @@ define <vscale x 4 x i32> @vmul_vx_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32
   ret <vscale x 4 x i32> %v
 }
 
+declare <vscale x 7 x i32> @llvm.vp.mul.nxv7i32(<vscale x 7 x i32>, <vscale x 7 x i32>, <vscale x 7 x i1>, i32)
+
+define <vscale x 7 x i32> @vmul_vv_nxv7i32(<vscale x 7 x i32> %va, <vscale x 7 x i32> %b, <vscale x 7 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vmul_vv_nxv7i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT:    vmul.vv v8, v8, v12, v0.t
+; CHECK-NEXT:    ret
+  %v = call <vscale x 7 x i32> @llvm.vp.mul.nxv7i32(<vscale x 7 x i32> %va, <vscale x 7 x i32> %b, <vscale x 7 x i1> %m, i32 %evl)
+  ret <vscale x 7 x i32> %v
+}
+
+define <vscale x 7 x i32> @vmul_vv_nxv7i32_unmasked(<vscale x 7 x i32> %va, <vscale x 7 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vmul_vv_nxv7i32_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT:    vmul.vv v8, v8, v12
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 7 x i1> undef, i1 true, i32 0
+  %m = shufflevector <vscale x 7 x i1> %head, <vscale x 7 x i1> undef, <vscale x 7 x i32> zeroinitializer
+  %v = call <vscale x 7 x i32> @llvm.vp.mul.nxv7i32(<vscale x 7 x i32> %va, <vscale x 7 x i32> %b, <vscale x 7 x i1> %m, i32 %evl)
+  ret <vscale x 7 x i32> %v
+}
+
+define <vscale x 7 x i32> @vmul_vx_nxv7i32(<vscale x 7 x i32> %va, i32 %b, <vscale x 7 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vmul_vx_nxv7i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT:    vmul.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+  %elt.head = insertelement <vscale x 7 x i32> undef, i32 %b, i32 0
+  %vb = shufflevector <vscale x 7 x i32> %elt.head, <vscale x 7 x i32> undef, <vscale x 7 x i32> zeroinitializer
+  %v = call <vscale x 7 x i32> @llvm.vp.mul.nxv7i32(<vscale x 7 x i32> %va, <vscale x 7 x i32> %vb, <vscale x 7 x i1> %m, i32 %evl)
+  ret <vscale x 7 x i32> %v
+}
+
+define <vscale x 7 x i32> @vmul_vx_nxv7i32_unmasked(<vscale x 7 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vmul_vx_nxv7i32_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT:    vmul.vx v8, v8, a0
+; CHECK-NEXT:    ret
+  %elt.head = insertelement <vscale x 7 x i32> undef, i32 %b, i32 0
+  %vb = shufflevector <vscale x 7 x i32> %elt.head, <vscale x 7 x i32> undef, <vscale x 7 x i32> zeroinitializer
+  %head = insertelement <vscale x 7 x i1> undef, i1 true, i32 0
+  %m = shufflevector <vscale x 7 x i1> %head, <vscale x 7 x i1> undef, <vscale x 7 x i32> zeroinitializer
+  %v = call <vscale x 7 x i32> @llvm.vp.mul.nxv7i32(<vscale x 7 x i32> %va, <vscale x 7 x i32> %vb, <vscale x 7 x i1> %m, i32 %evl)
+  ret <vscale x 7 x i32> %v
+}
+
 declare <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i1>, i32)
 
 define <vscale x 8 x i32> @vmul_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vor-vp.ll
index ecccda50c5d1..586ee00afcaf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vor-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vor-vp.ll
@@ -1296,6 +1296,82 @@ define <vscale x 8 x i32> @vor_vi_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32 z
   ret <vscale x 8 x i32> %v
 }
 
+declare <vscale x 10 x i32> @llvm.vp.or.nxv10i32(<vscale x 10 x i32>, <vscale x 10 x i32>, <vscale x 10 x i1>, i32)
+
+define <vscale x 10 x i32> @vor_vv_nxv10i32(<vscale x 10 x i32> %va, <vscale x 10 x i32> %b, <vscale x 10 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vor_vv_nxv10i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT:    vor.vv v8, v8, v16, v0.t
+; CHECK-NEXT:    ret
+  %v = call <vscale x 10 x i32> @llvm.vp.or.nxv10i32(<vscale x 10 x i32> %va, <vscale x 10 x i32> %b, <vscale x 10 x i1> %m, i32 %evl)
+  ret <vscale x 10 x i32> %v
+}
+
+define <vscale x 10 x i32> @vor_vv_nxv10i32_unmasked(<vscale x 10 x i32> %va, <vscale x 10 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vor_vv_nxv10i32_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT:    vor.vv v8, v8, v16
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 10 x i1> undef, i1 true, i32 0
+  %m = shufflevector <vscale x 10 x i1> %head, <vscale x 10 x i1> undef, <vscale x 10 x i32> zeroinitializer
+  %v = call <vscale x 10 x i32> @llvm.vp.or.nxv10i32(<vscale x 10 x i32> %va, <vscale x 10 x i32> %b, <vscale x 10 x i1> %m, i32 %evl)
+  ret <vscale x 10 x i32> %v
+}
+
+define <vscale x 10 x i32> @vor_vx_nxv10i32(<vscale x 10 x i32> %va, i32 %b, <vscale x 10 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vor_vx_nxv10i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT:    vor.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+  %elt.head = insertelement <vscale x 10 x i32> undef, i32 %b, i32 0
+  %vb = shufflevector <vscale x 10 x i32> %elt.head, <vscale x 10 x i32> undef, <vscale x 10 x i32> zeroinitializer
+  %v = call <vscale x 10 x i32> @llvm.vp.or.nxv10i32(<vscale x 10 x i32> %va, <vscale x 10 x i32> %vb, <vscale x 10 x i1> %m, i32 %evl)
+  ret <vscale x 10 x i32> %v
+}
+
+define <vscale x 10 x i32> @vor_vx_nxv10i32_unmasked(<vscale x 10 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vor_vx_nxv10i32_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT:    vor.vx v8, v8, a0
+; CHECK-NEXT:    ret
+  %elt.head = insertelement <vscale x 10 x i32> undef, i32 %b, i32 0
+  %vb = shufflevector <vscale x 10 x i32> %elt.head, <vscale x 10 x i32> undef, <vscale x 10 x i32> zeroinitializer
+  %head = insertelement <vscale x 10 x i1> undef, i1 true, i32 0
+  %m = shufflevector <vscale x 10 x i1> %head, <vscale x 10 x i1> undef, <vscale x 10 x i32> zeroinitializer
+  %v = call <vscale x 10 x i32> @llvm.vp.or.nxv10i32(<vscale x 10 x i32> %va, <vscale x 10 x i32> %vb, <vscale x 10 x i1> %m, i32 %evl)
+  ret <vscale x 10 x i32> %v
+}
+
+define <vscale x 10 x i32> @vor_vi_nxv10i32(<vscale x 10 x i32> %va, <vscale x 10 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vor_vi_nxv10i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT:    vor.vi v8, v8, 5, v0.t
+; CHECK-NEXT:    ret
+  %elt.head = insertelement <vscale x 10 x i32> undef, i32 5, i32 0
+  %vb = shufflevector <vscale x 10 x i32> %elt.head, <vscale x 10 x i32> undef, <vscale x 10 x i32> zeroinitializer
+  %v = call <vscale x 10 x i32> @llvm.vp.or.nxv10i32(<vscale x 10 x i32> %va, <vscale x 10 x i32> %vb, <vscale x 10 x i1> %m, i32 %evl)
+  ret <vscale x 10 x i32> %v
+}
+
+define <vscale x 10 x i32> @vor_vi_nxv10i32_unmasked(<vscale x 10 x i32> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vor_vi_nxv10i32_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT:    vor.vi v8, v8, 5
+; CHECK-NEXT:    ret
+  %elt.head = insertelement <vscale x 10 x i32> undef, i32 5, i32 0
+  %vb = shufflevector <vscale x 10 x i32> %elt.head, <vscale x 10 x i32> undef, <vscale x 10 x i32> zeroinitializer
+  %head = insertelement <vscale x 10 x i1> undef, i1 true, i32 0
+  %m = shufflevector <vscale x 10 x i1> %head, <vscale x 10 x i1> undef, <vscale x 10 x i32> zeroinitializer
+  %v = call <vscale x 10 x i32> @llvm.vp.or.nxv10i32(<vscale x 10 x i32> %va, <vscale x 10 x i32> %vb, <vscale x 10 x i1> %m, i32 %evl)
+  ret <vscale x 10 x i32> %v
+}
+
 declare <vscale x 16 x i32> @llvm.vp.or.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i1>, i32)
 
 define <vscale x 16 x i32> @vor_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll
index 0bb7504e9436..89f2b7d2b686 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll
@@ -104,6 +104,18 @@ define <vscale x 2 x i8> @vrem_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b,
   ret <vscale x 2 x i8> %v
 }
 
+declare <vscale x 3 x i8> @llvm.vp.srem.nxv3i8(<vscale x 3 x i8>, <vscale x 3 x i8>, <vscale x 3 x i1>, i32)
+
+define <vscale x 3 x i8> @vrem_vv_nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_nxv3i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vrem.vv v8, v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %v = call <vscale x 3 x i8> @llvm.vp.srem.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 %evl)
+  ret <vscale x 3 x i8> %v
+}
+
 declare <vscale x 4 x i8> @llvm.vp.srem.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
 
 define <vscale x 4 x i8> @vrem_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll
index f6e0cb71c241..e7a3b33f3ba1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll
@@ -104,6 +104,18 @@ define <vscale x 2 x i8> @vremu_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b,
   ret <vscale x 2 x i8> %v
 }
 
+declare <vscale x 3 x i8> @llvm.vp.urem.nxv3i8(<vscale x 3 x i8>, <vscale x 3 x i8>, <vscale x 3 x i1>, i32)
+
+define <vscale x 3 x i8> @vremu_vv_nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_nxv3i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vremu.vv v8, v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %v = call <vscale x 3 x i8> @llvm.vp.urem.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 %evl)
+  ret <vscale x 3 x i8> %v
+}
+
 declare <vscale x 4 x i8> @llvm.vp.urem.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
 
 define <vscale x 4 x i8> @vremu_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vshl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vshl-vp.ll
index 1aca9277d9a0..061a2f44ead8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vshl-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vshl-vp.ll
@@ -232,6 +232,18 @@ define <vscale x 4 x i8> @vsll_vi_nxv4i8_unmasked(<vscale x 4 x i8> %va, i32 zer
   ret <vscale x 4 x i8> %v
 }
 
+declare <vscale x 5 x i8> @llvm.vp.shl.nxv5i8(<vscale x 5 x i8>, <vscale x 5 x i8>, <vscale x 5 x i1>, i32)
+
+define <vscale x 5 x i8> @vsll_vv_nxv5i8(<vscale x 5 x i8> %va, <vscale x 5 x i8> %b, <vscale x 5 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsll_vv_nxv5i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vsll.vv v8, v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %v = call <vscale x 5 x i8> @llvm.vp.shl.nxv5i8(<vscale x 5 x i8> %va, <vscale x 5 x i8> %b, <vscale x 5 x i1> %m, i32 %evl)
+  ret <vscale x 5 x i8> %v
+}
+
 declare <vscale x 8 x i8> @llvm.vp.shl.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i32)
 
 define <vscale x 8 x i8> @vsll_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsra-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-vp.ll
index 3ddd25937273..2ff5d07860f2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsra-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsra-vp.ll
@@ -1636,6 +1636,18 @@ define <vscale x 4 x i64> @vsra_vi_nxv4i64_unmasked(<vscale x 4 x i64> %va, i32
   ret <vscale x 4 x i64> %v
 }
 
+declare <vscale x 6 x i64> @llvm.vp.ashr.nxv6i64(<vscale x 6 x i64>, <vscale x 6 x i64>, <vscale x 6 x i1>, i32)
+
+define <vscale x 6 x i64> @vsra_vv_nxv6i64(<vscale x 6 x i64> %va, <vscale x 6 x i64> %b, <vscale x 6 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsra_vv_nxv6i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT:    vsra.vv v8, v8, v16, v0.t
+; CHECK-NEXT:    ret
+  %v = call <vscale x 6 x i64> @llvm.vp.ashr.nxv6i64(<vscale x 6 x i64> %va, <vscale x 6 x i64> %b, <vscale x 6 x i1> %m, i32 %evl)
+  ret <vscale x 6 x i64> %v
+}
+
 declare <vscale x 8 x i64> @llvm.vp.ashr.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i32)
 
 define <vscale x 8 x i64> @vsra_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-vp.ll
index f42b5798e132..b58873b452b6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsrl-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsrl-vp.ll
@@ -1636,6 +1636,18 @@ define <vscale x 4 x i64> @vsrl_vi_nxv4i64_unmasked(<vscale x 4 x i64> %va, i32
   ret <vscale x 4 x i64> %v
 }
 
+declare <vscale x 5 x i64> @llvm.vp.lshr.nxv5i64(<vscale x 5 x i64>, <vscale x 5 x i64>, <vscale x 5 x i1>, i32)
+
+define <vscale x 5 x i64> @vsrl_vv_nxv5i64(<vscale x 5 x i64> %va, <vscale x 5 x i64> %b, <vscale x 5 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsrl_vv_nxv5i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT:    vsrl.vv v8, v8, v16, v0.t
+; CHECK-NEXT:    ret
+  %v = call <vscale x 5 x i64> @llvm.vp.lshr.nxv5i64(<vscale x 5 x i64> %va, <vscale x 5 x i64> %b, <vscale x 5 x i1> %m, i32 %evl)
+  ret <vscale x 5 x i64> %v
+}
+
 declare <vscale x 8 x i64> @llvm.vp.lshr.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i32)
 
 define <vscale x 8 x i64> @vsrl_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll
index a0b7b7de0227..94e697f97cd9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll
@@ -154,6 +154,56 @@ define <vscale x 4 x i8> @vsub_vx_nxv4i8_unmasked(<vscale x 4 x i8> %va, i8 %b,
   ret <vscale x 4 x i8> %v
 }
 
+declare <vscale x 5 x i8> @llvm.vp.sub.nxv5i8(<vscale x 5 x i8>, <vscale x 5 x i8>, <vscale x 5 x i1>, i32)
+
+define <vscale x 5 x i8> @vsub_vv_nxv5i8(<vscale x 5 x i8> %va, <vscale x 5 x i8> %b, <vscale x 5 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsub_vv_nxv5i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vsub.vv v8, v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %v = call <vscale x 5 x i8> @llvm.vp.sub.nxv5i8(<vscale x 5 x i8> %va, <vscale x 5 x i8> %b, <vscale x 5 x i1> %m, i32 %evl)
+  ret <vscale x 5 x i8> %v
+}
+
+define <vscale x 5 x i8> @vsub_vv_nxv5i8_unmasked(<vscale x 5 x i8> %va, <vscale x 5 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsub_vv_nxv5i8_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vsub.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 5 x i1> undef, i1 true, i32 0
+  %m = shufflevector <vscale x 5 x i1> %head, <vscale x 5 x i1> undef, <vscale x 5 x i32> zeroinitializer
+  %v = call <vscale x 5 x i8> @llvm.vp.sub.nxv5i8(<vscale x 5 x i8> %va, <vscale x 5 x i8> %b, <vscale x 5 x i1> %m, i32 %evl)
+  ret <vscale x 5 x i8> %v
+}
+
+define <vscale x 5 x i8> @vsub_vx_nxv5i8(<vscale x 5 x i8> %va, i8 %b, <vscale x 5 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsub_vx_nxv5i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT:    vsub.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+  %elt.head = insertelement <vscale x 5 x i8> undef, i8 %b, i32 0
+  %vb = shufflevector <vscale x 5 x i8> %elt.head, <vscale x 5 x i8> undef, <vscale x 5 x i32> zeroinitializer
+  %v = call <vscale x 5 x i8> @llvm.vp.sub.nxv5i8(<vscale x 5 x i8> %va, <vscale x 5 x i8> %vb, <vscale x 5 x i1> %m, i32 %evl)
+  ret <vscale x 5 x i8> %v
+}
+
+define <vscale x 5 x i8> @vsub_vx_nxv5i8_unmasked(<vscale x 5 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsub_vx_nxv5i8_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT:    vsub.vx v8, v8, a0
+; CHECK-NEXT:    ret
+  %elt.head = insertelement <vscale x 5 x i8> undef, i8 %b, i32 0
+  %vb = shufflevector <vscale x 5 x i8> %elt.head, <vscale x 5 x i8> undef, <vscale x 5 x i32> zeroinitializer
+  %head = insertelement <vscale x 5 x i1> undef, i1 true, i32 0
+  %m = shufflevector <vscale x 5 x i1> %head, <vscale x 5 x i1> undef, <vscale x 5 x i32> zeroinitializer
+  %v = call <vscale x 5 x i8> @llvm.vp.sub.nxv5i8(<vscale x 5 x i8> %va, <vscale x 5 x i8> %vb, <vscale x 5 x i1> %m, i32 %evl)
+  ret <vscale x 5 x i8> %v
+}
+
 declare <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i32)
 
 define <vscale x 8 x i8> @vsub_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll
index a9756058b8c0..cfd5bd116c6c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll
@@ -412,6 +412,108 @@ define <vscale x 8 x i8> @vxor_vi_nxv8i8_unmasked_1(<vscale x 8 x i8> %va, i32 z
   ret <vscale x 8 x i8> %v
 }
 
+declare <vscale x 15 x i8> @llvm.vp.xor.nxv15i8(<vscale x 15 x i8>, <vscale x 15 x i8>, <vscale x 15 x i1>, i32)
+
+define <vscale x 15 x i8> @vxor_vv_nxv15i8(<vscale x 15 x i8> %va, <vscale x 15 x i8> %b, <vscale x 15 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vxor_vv_nxv15i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    vxor.vv v8, v8, v10, v0.t
+; CHECK-NEXT:    ret
+  %v = call <vscale x 15 x i8> @llvm.vp.xor.nxv15i8(<vscale x 15 x i8> %va, <vscale x 15 x i8> %b, <vscale x 15 x i1> %m, i32 %evl)
+  ret <vscale x 15 x i8> %v
+}
+
+define <vscale x 15 x i8> @vxor_vv_nxv15i8_unmasked(<vscale x 15 x i8> %va, <vscale x 15 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vxor_vv_nxv15i8_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    vxor.vv v8, v8, v10
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 15 x i1> undef, i1 true, i32 0
+  %m = shufflevector <vscale x 15 x i1> %head, <vscale x 15 x i1> undef, <vscale x 15 x i32> zeroinitializer
+  %v = call <vscale x 15 x i8> @llvm.vp.xor.nxv15i8(<vscale x 15 x i8> %va, <vscale x 15 x i8> %b, <vscale x 15 x i1> %m, i32 %evl)
+  ret <vscale x 15 x i8> %v
+}
+
+define <vscale x 15 x i8> @vxor_vx_nxv15i8(<vscale x 15 x i8> %va, i8 %b, <vscale x 15 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vxor_vx_nxv15i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT:    vxor.vx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+  %elt.head = insertelement <vscale x 15 x i8> undef, i8 %b, i32 0
+  %vb = shufflevector <vscale x 15 x i8> %elt.head, <vscale x 15 x i8> undef, <vscale x 15 x i32> zeroinitializer
+  %v = call <vscale x 15 x i8> @llvm.vp.xor.nxv15i8(<vscale x 15 x i8> %va, <vscale x 15 x i8> %vb, <vscale x 15 x i1> %m, i32 %evl)
+  ret <vscale x 15 x i8> %v
+}
+
+define <vscale x 15 x i8> @vxor_vx_nxv15i8_unmasked(<vscale x 15 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vxor_vx_nxv15i8_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT:    vxor.vx v8, v8, a0
+; CHECK-NEXT:    ret
+  %elt.head = insertelement <vscale x 15 x i8> undef, i8 %b, i32 0
+  %vb = shufflevector <vscale x 15 x i8> %elt.head, <vscale x 15 x i8> undef, <vscale x 15 x i32> zeroinitializer
+  %head = insertelement <vscale x 15 x i1> undef, i1 true, i32 0
+  %m = shufflevector <vscale x 15 x i1> %head, <vscale x 15 x i1> undef, <vscale x 15 x i32> zeroinitializer
+  %v = call <vscale x 15 x i8> @llvm.vp.xor.nxv15i8(<vscale x 15 x i8> %va, <vscale x 15 x i8> %vb, <vscale x 15 x i1> %m, i32 %evl)
+  ret <vscale x 15 x i8> %v
+}
+
+define <vscale x 15 x i8> @vxor_vi_nxv15i8(<vscale x 15 x i8> %va, <vscale x 15 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vxor_vi_nxv15i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    vxor.vi v8, v8, 7, v0.t
+; CHECK-NEXT:    ret
+  %elt.head = insertelement <vscale x 15 x i8> undef, i8 7, i32 0
+  %vb = shufflevector <vscale x 15 x i8> %elt.head, <vscale x 15 x i8> undef, <vscale x 15 x i32> zeroinitializer
+  %v = call <vscale x 15 x i8> @llvm.vp.xor.nxv15i8(<vscale x 15 x i8> %va, <vscale x 15 x i8> %vb, <vscale x 15 x i1> %m, i32 %evl)
+  ret <vscale x 15 x i8> %v
+}
+
+define <vscale x 15 x i8> @vxor_vi_nxv15i8_unmasked(<vscale x 15 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vxor_vi_nxv15i8_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    vxor.vi v8, v8, 7
+; CHECK-NEXT:    ret
+  %elt.head = insertelement <vscale x 15 x i8> undef, i8 7, i32 0
+  %vb = shufflevector <vscale x 15 x i8> %elt.head, <vscale x 15 x i8> undef, <vscale x 15 x i32> zeroinitializer
+  %head = insertelement <vscale x 15 x i1> undef, i1 true, i32 0
+  %m = shufflevector <vscale x 15 x i1> %head, <vscale x 15 x i1> undef, <vscale x 15 x i32> zeroinitializer
+  %v = call <vscale x 15 x i8> @llvm.vp.xor.nxv15i8(<vscale x 15 x i8> %va, <vscale x 15 x i8> %vb, <vscale x 15 x i1> %m, i32 %evl)
+  ret <vscale x 15 x i8> %v
+}
+
+define <vscale x 15 x i8> @vxor_vi_nxv15i8_1(<vscale x 15 x i8> %va, <vscale x 15 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vxor_vi_nxv15i8_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    vnot.v v8, v8, v0.t
+; CHECK-NEXT:    ret
+  %elt.head = insertelement <vscale x 15 x i8> undef, i8 -1, i32 0
+  %vb = shufflevector <vscale x 15 x i8> %elt.head, <vscale x 15 x i8> undef, <vscale x 15 x i32> zeroinitializer
+  %v = call <vscale x 15 x i8> @llvm.vp.xor.nxv15i8(<vscale x 15 x i8> %va, <vscale x 15 x i8> %vb, <vscale x 15 x i1> %m, i32 %evl)
+  ret <vscale x 15 x i8> %v
+}
+
+define <vscale x 15 x i8> @vxor_vi_nxv15i8_unmasked_1(<vscale x 15 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vxor_vi_nxv15i8_unmasked_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    vxor.vi v8, v8, -1
+; CHECK-NEXT:    ret
+  %elt.head = insertelement <vscale x 15 x i8> undef, i8 -1, i32 0
+  %vb = shufflevector <vscale x 15 x i8> %elt.head, <vscale x 15 x i8> undef, <vscale x 15 x i32> zeroinitializer
+  %head = insertelement <vscale x 15 x i1> undef, i1 true, i32 0
+  %m = shufflevector <vscale x 15 x i1> %head, <vscale x 15 x i1> undef, <vscale x 15 x i32> zeroinitializer
+  %v = call <vscale x 15 x i8> @llvm.vp.xor.nxv15i8(<vscale x 15 x i8> %va, <vscale x 15 x i8> %vb, <vscale x 15 x i1> %m, i32 %evl)
+  ret <vscale x 15 x i8> %v
+}
+
 declare <vscale x 16 x i8> @llvm.vp.xor.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i32)
 
 define <vscale x 16 x i8> @vxor_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {


        


More information about the llvm-commits mailing list