[llvm] e6d3261 - [ARM][MVE] Refactor complex vector intrinsics [NFCI]

Mikhail Maltsev via llvm-commits llvm-commits at lists.llvm.org
Tue Dec 10 08:22:55 PST 2019


Author: Mikhail Maltsev
Date: 2019-12-10T16:21:52Z
New Revision: e6d3261c67ecade5d959ee3094eb2bd1cd7ec447

URL: https://github.com/llvm/llvm-project/commit/e6d3261c67ecade5d959ee3094eb2bd1cd7ec447
DIFF: https://github.com/llvm/llvm-project/commit/e6d3261c67ecade5d959ee3094eb2bd1cd7ec447.diff

LOG: [ARM][MVE] Refactor complex vector intrinsics [NFCI]

Summary:
This patch refactors instruction selection of the complex vector
addition, multiplication and multiply-add intrinsics, so that it is
now based on TableGen patterns rather than C++ code.

It also changes the first parameter (halving vs non-halving) of the
arm_mve_vcaddq IR intrinsic to match the corresponding instruction
encoding, hence it requires some changes in the tests.

The patch addresses David's comment in https://reviews.llvm.org/D71190

Reviewers: dmgreen, ostannard, simon_tatham, MarkMurrayARM

Reviewed By: dmgreen

Subscribers: merge_guards_bot, kristof.beyls, hiraditya, cfe-commits, llvm-commits

Tags: #clang, #llvm

Differential Revision: https://reviews.llvm.org/D71245

Added: 
    

Modified: 
    clang/include/clang/Basic/arm_mve.td
    clang/test/CodeGen/arm-mve-intrinsics/vcaddq.c
    clang/test/CodeGen/arm-mve-intrinsics/vhcaddq.c
    llvm/include/llvm/IR/IntrinsicsARM.td
    llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
    llvm/lib/Target/ARM/ARMInstrMVE.td
    llvm/test/CodeGen/Thumb2/mve-intrinsics/vcaddq.ll

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/Basic/arm_mve.td b/clang/include/clang/Basic/arm_mve.td
index f3d3f4124101..618a087d6275 100644
--- a/clang/include/clang/Basic/arm_mve.td
+++ b/clang/include/clang/Basic/arm_mve.td
@@ -683,16 +683,16 @@ def vadciq_m: Intrinsic<Vector, (args Vector:$inactive, Vector:$a, Vector:$b,
          (xval $pair, 0))>;
 }
 
-multiclass VectorComplexAddPred<dag halving, dag angle> {
+multiclass VectorComplexAddPred<dag not_halving, dag angle> {
   def "" : Intrinsic<Vector, (args Vector:$a, Vector:$b),
-     (IRInt<"vcaddq", [Vector]> halving, angle, $a, $b)>;
+     (IRInt<"vcaddq", [Vector]> not_halving, angle, $a, $b)>;
   def _m : Intrinsic<Vector, (args Vector:$inactive, Vector:$a, Vector:$b,
                               Predicate:$pred),
      (IRInt<"vcaddq_predicated", [Vector, Predicate]>
-       halving, angle, $inactive, $a, $b, $pred)>;
+       not_halving, angle, $inactive, $a, $b, $pred)>;
   def _x : Intrinsic<Vector, (args Vector:$a, Vector:$b, Predicate:$pred),
      (IRInt<"vcaddq_predicated", [Vector, Predicate]>
-       halving, angle, (undef Vector), $a, $b, $pred)>;
+       not_halving, angle, (undef Vector), $a, $b, $pred)>;
 }
 
 multiclass VectorComplexMulPred<dag angle> {
@@ -715,9 +715,9 @@ multiclass VectorComplexMLAPred<dag angle> {
     (IRInt<"vcmlaq_predicated", [Vector, Predicate]> angle, $a, $b, $c, $pred)>;
 }
 
-multiclass VectorComplexAddAngle<dag halving> {
-  defm _rot90 : VectorComplexAddPred<halving, (u32 0)>;
-  defm _rot270 : VectorComplexAddPred<halving, (u32 1)>;
+multiclass VectorComplexAddAngle<dag not_halving> {
+  defm _rot90 : VectorComplexAddPred<not_halving, (u32 0)>;
+  defm _rot270 : VectorComplexAddPred<not_halving, (u32 1)>;
 }
 
 multiclass VectorComplexMulAngle {
@@ -735,10 +735,10 @@ multiclass VectorComplexMLAAngle {
 }
 
 let params = T.Usual in
-defm vcaddq : VectorComplexAddAngle<(u32 0)>;
+defm vcaddq : VectorComplexAddAngle<(u32 1)>;
 
 let params = T.Signed in
-defm vhcaddq : VectorComplexAddAngle<(u32 1)>;
+defm vhcaddq : VectorComplexAddAngle<(u32 0)>;
 
 let params = T.Float in {
 defm vcmulq : VectorComplexMulAngle;

diff  --git a/clang/test/CodeGen/arm-mve-intrinsics/vcaddq.c b/clang/test/CodeGen/arm-mve-intrinsics/vcaddq.c
index 3e00384eeb61..f23751a3526a 100644
--- a/clang/test/CodeGen/arm-mve-intrinsics/vcaddq.c
+++ b/clang/test/CodeGen/arm-mve-intrinsics/vcaddq.c
@@ -6,7 +6,7 @@
 
 // CHECK-LABEL: @test_vcaddq_rot90_u8(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 0, i32 0, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]])
+// CHECK-NEXT:    [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 1, i32 0, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]])
 // CHECK-NEXT:    ret <16 x i8> [[TMP0]]
 //
 uint8x16_t test_vcaddq_rot90_u8(uint8x16_t a, uint8x16_t b)
@@ -20,7 +20,7 @@ uint8x16_t test_vcaddq_rot90_u8(uint8x16_t a, uint8x16_t b)
 
 // CHECK-LABEL: @test_vcaddq_rot90_u16(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 0, i32 0, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]])
+// CHECK-NEXT:    [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 1, i32 0, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]])
 // CHECK-NEXT:    ret <8 x i16> [[TMP0]]
 //
 uint16x8_t test_vcaddq_rot90_u16(uint16x8_t a, uint16x8_t b)
@@ -34,7 +34,7 @@ uint16x8_t test_vcaddq_rot90_u16(uint16x8_t a, uint16x8_t b)
 
 // CHECK-LABEL: @test_vcaddq_rot90_u32(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 0, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]])
+// CHECK-NEXT:    [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 1, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]])
 // CHECK-NEXT:    ret <4 x i32> [[TMP0]]
 //
 uint32x4_t test_vcaddq_rot90_u32(uint32x4_t a, uint32x4_t b)
@@ -48,7 +48,7 @@ uint32x4_t test_vcaddq_rot90_u32(uint32x4_t a, uint32x4_t b)
 
 // CHECK-LABEL: @test_vcaddq_rot90_s8(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 0, i32 0, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]])
+// CHECK-NEXT:    [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 1, i32 0, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]])
 // CHECK-NEXT:    ret <16 x i8> [[TMP0]]
 //
 int8x16_t test_vcaddq_rot90_s8(int8x16_t a, int8x16_t b)
@@ -62,7 +62,7 @@ int8x16_t test_vcaddq_rot90_s8(int8x16_t a, int8x16_t b)
 
 // CHECK-LABEL: @test_vcaddq_rot90_s16(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 0, i32 0, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]])
+// CHECK-NEXT:    [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 1, i32 0, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]])
 // CHECK-NEXT:    ret <8 x i16> [[TMP0]]
 //
 int16x8_t test_vcaddq_rot90_s16(int16x8_t a, int16x8_t b)
@@ -76,7 +76,7 @@ int16x8_t test_vcaddq_rot90_s16(int16x8_t a, int16x8_t b)
 
 // CHECK-LABEL: @test_vcaddq_rot90_s32(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 0, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]])
+// CHECK-NEXT:    [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 1, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]])
 // CHECK-NEXT:    ret <4 x i32> [[TMP0]]
 //
 int32x4_t test_vcaddq_rot90_s32(int32x4_t a, int32x4_t b)
@@ -90,7 +90,7 @@ int32x4_t test_vcaddq_rot90_s32(int32x4_t a, int32x4_t b)
 
 // CHECK-LABEL: @test_vcaddq_rot90_f16(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TMP0:%.*]] = call <8 x half> @llvm.arm.mve.vcaddq.v8f16(i32 0, i32 0, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]])
+// CHECK-NEXT:    [[TMP0:%.*]] = call <8 x half> @llvm.arm.mve.vcaddq.v8f16(i32 1, i32 0, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]])
 // CHECK-NEXT:    ret <8 x half> [[TMP0]]
 //
 float16x8_t test_vcaddq_rot90_f16(float16x8_t a, float16x8_t b)
@@ -104,7 +104,7 @@ float16x8_t test_vcaddq_rot90_f16(float16x8_t a, float16x8_t b)
 
 // CHECK-LABEL: @test_vcaddq_rot90_f32(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TMP0:%.*]] = call <4 x float> @llvm.arm.mve.vcaddq.v4f32(i32 0, i32 0, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]])
+// CHECK-NEXT:    [[TMP0:%.*]] = call <4 x float> @llvm.arm.mve.vcaddq.v4f32(i32 1, i32 0, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]])
 // CHECK-NEXT:    ret <4 x float> [[TMP0]]
 //
 float32x4_t test_vcaddq_rot90_f32(float32x4_t a, float32x4_t b)
@@ -118,7 +118,7 @@ float32x4_t test_vcaddq_rot90_f32(float32x4_t a, float32x4_t b)
 
 // CHECK-LABEL: @test_vcaddq_rot270_u8(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 0, i32 1, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]])
+// CHECK-NEXT:    [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 1, i32 1, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]])
 // CHECK-NEXT:    ret <16 x i8> [[TMP0]]
 //
 uint8x16_t test_vcaddq_rot270_u8(uint8x16_t a, uint8x16_t b)
@@ -132,7 +132,7 @@ uint8x16_t test_vcaddq_rot270_u8(uint8x16_t a, uint8x16_t b)
 
 // CHECK-LABEL: @test_vcaddq_rot270_u16(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 0, i32 1, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]])
+// CHECK-NEXT:    [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 1, i32 1, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]])
 // CHECK-NEXT:    ret <8 x i16> [[TMP0]]
 //
 uint16x8_t test_vcaddq_rot270_u16(uint16x8_t a, uint16x8_t b)
@@ -146,7 +146,7 @@ uint16x8_t test_vcaddq_rot270_u16(uint16x8_t a, uint16x8_t b)
 
 // CHECK-LABEL: @test_vcaddq_rot270_u32(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 0, i32 1, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]])
+// CHECK-NEXT:    [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 1, i32 1, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]])
 // CHECK-NEXT:    ret <4 x i32> [[TMP0]]
 //
 uint32x4_t test_vcaddq_rot270_u32(uint32x4_t a, uint32x4_t b)
@@ -160,7 +160,7 @@ uint32x4_t test_vcaddq_rot270_u32(uint32x4_t a, uint32x4_t b)
 
 // CHECK-LABEL: @test_vcaddq_rot270_s8(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 0, i32 1, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]])
+// CHECK-NEXT:    [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 1, i32 1, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]])
 // CHECK-NEXT:    ret <16 x i8> [[TMP0]]
 //
 int8x16_t test_vcaddq_rot270_s8(int8x16_t a, int8x16_t b)
@@ -174,7 +174,7 @@ int8x16_t test_vcaddq_rot270_s8(int8x16_t a, int8x16_t b)
 
 // CHECK-LABEL: @test_vcaddq_rot270_s16(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 0, i32 1, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]])
+// CHECK-NEXT:    [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 1, i32 1, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]])
 // CHECK-NEXT:    ret <8 x i16> [[TMP0]]
 //
 int16x8_t test_vcaddq_rot270_s16(int16x8_t a, int16x8_t b)
@@ -188,7 +188,7 @@ int16x8_t test_vcaddq_rot270_s16(int16x8_t a, int16x8_t b)
 
 // CHECK-LABEL: @test_vcaddq_rot270_s32(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 0, i32 1, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]])
+// CHECK-NEXT:    [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 1, i32 1, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]])
 // CHECK-NEXT:    ret <4 x i32> [[TMP0]]
 //
 int32x4_t test_vcaddq_rot270_s32(int32x4_t a, int32x4_t b)
@@ -202,7 +202,7 @@ int32x4_t test_vcaddq_rot270_s32(int32x4_t a, int32x4_t b)
 
 // CHECK-LABEL: @test_vcaddq_rot270_f16(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TMP0:%.*]] = call <8 x half> @llvm.arm.mve.vcaddq.v8f16(i32 0, i32 1, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]])
+// CHECK-NEXT:    [[TMP0:%.*]] = call <8 x half> @llvm.arm.mve.vcaddq.v8f16(i32 1, i32 1, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]])
 // CHECK-NEXT:    ret <8 x half> [[TMP0]]
 //
 float16x8_t test_vcaddq_rot270_f16(float16x8_t a, float16x8_t b)
@@ -216,7 +216,7 @@ float16x8_t test_vcaddq_rot270_f16(float16x8_t a, float16x8_t b)
 
 // CHECK-LABEL: @test_vcaddq_rot270_f32(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TMP0:%.*]] = call <4 x float> @llvm.arm.mve.vcaddq.v4f32(i32 0, i32 1, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]])
+// CHECK-NEXT:    [[TMP0:%.*]] = call <4 x float> @llvm.arm.mve.vcaddq.v4f32(i32 1, i32 1, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]])
 // CHECK-NEXT:    ret <4 x float> [[TMP0]]
 //
 float32x4_t test_vcaddq_rot270_f32(float32x4_t a, float32x4_t b)
@@ -233,7 +233,7 @@ float32x4_t test_vcaddq_rot270_f32(float32x4_t a, float32x4_t b)
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
 // CHECK-NEXT:    [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
-// CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 0, <16 x i8> [[INACTIVE:%.*]], <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 0, <16 x i8> [[INACTIVE:%.*]], <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]])
 // CHECK-NEXT:    ret <16 x i8> [[TMP2]]
 //
 uint8x16_t test_vcaddq_rot90_m_u8(uint8x16_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
@@ -249,7 +249,7 @@ uint8x16_t test_vcaddq_rot90_m_u8(uint8x16_t inactive, uint8x16_t a, uint8x16_t
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
 // CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
-// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 0, <8 x i16> [[INACTIVE:%.*]], <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 0, <8 x i16> [[INACTIVE:%.*]], <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
 // CHECK-NEXT:    ret <8 x i16> [[TMP2]]
 //
 uint16x8_t test_vcaddq_rot90_m_u16(uint16x8_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
@@ -265,7 +265,7 @@ uint16x8_t test_vcaddq_rot90_m_u16(uint16x8_t inactive, uint16x8_t a, uint16x8_t
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
 // CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
-// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 0, <4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 0, <4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
 // CHECK-NEXT:    ret <4 x i32> [[TMP2]]
 //
 uint32x4_t test_vcaddq_rot90_m_u32(uint32x4_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p)
@@ -281,7 +281,7 @@ uint32x4_t test_vcaddq_rot90_m_u32(uint32x4_t inactive, uint32x4_t a, uint32x4_t
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
 // CHECK-NEXT:    [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
-// CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 0, <16 x i8> [[INACTIVE:%.*]], <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 0, <16 x i8> [[INACTIVE:%.*]], <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]])
 // CHECK-NEXT:    ret <16 x i8> [[TMP2]]
 //
 int8x16_t test_vcaddq_rot90_m_s8(int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
@@ -297,7 +297,7 @@ int8x16_t test_vcaddq_rot90_m_s8(int8x16_t inactive, int8x16_t a, int8x16_t b, m
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
 // CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
-// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 0, <8 x i16> [[INACTIVE:%.*]], <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 0, <8 x i16> [[INACTIVE:%.*]], <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
 // CHECK-NEXT:    ret <8 x i16> [[TMP2]]
 //
 int16x8_t test_vcaddq_rot90_m_s16(int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
@@ -313,7 +313,7 @@ int16x8_t test_vcaddq_rot90_m_s16(int16x8_t inactive, int16x8_t a, int16x8_t b,
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
 // CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
-// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 0, <4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 0, <4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
 // CHECK-NEXT:    ret <4 x i32> [[TMP2]]
 //
 int32x4_t test_vcaddq_rot90_m_s32(int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
@@ -329,7 +329,7 @@ int32x4_t test_vcaddq_rot90_m_s32(int32x4_t inactive, int32x4_t a, int32x4_t b,
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
 // CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
-// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcaddq.predicated.v8f16.v8i1(i32 0, i32 0, <8 x half> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcaddq.predicated.v8f16.v8i1(i32 1, i32 0, <8 x half> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x i1> [[TMP1]])
 // CHECK-NEXT:    ret <8 x half> [[TMP2]]
 //
 float16x8_t test_vcaddq_rot90_m_f16(float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
@@ -345,7 +345,7 @@ float16x8_t test_vcaddq_rot90_m_f16(float16x8_t inactive, float16x8_t a, float16
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
 // CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
-// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcaddq.predicated.v4f32.v4i1(i32 0, i32 0, <4 x float> [[INACTIVE:%.*]], <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcaddq.predicated.v4f32.v4i1(i32 1, i32 0, <4 x float> [[INACTIVE:%.*]], <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x i1> [[TMP1]])
 // CHECK-NEXT:    ret <4 x float> [[TMP2]]
 //
 float32x4_t test_vcaddq_rot90_m_f32(float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p)
@@ -361,7 +361,7 @@ float32x4_t test_vcaddq_rot90_m_f32(float32x4_t inactive, float32x4_t a, float32
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
 // CHECK-NEXT:    [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
-// CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 1, <16 x i8> [[INACTIVE:%.*]], <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 1, <16 x i8> [[INACTIVE:%.*]], <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]])
 // CHECK-NEXT:    ret <16 x i8> [[TMP2]]
 //
 uint8x16_t test_vcaddq_rot270_m_u8(uint8x16_t inactive, uint8x16_t a, uint8x16_t b, mve_pred16_t p)
@@ -377,7 +377,7 @@ uint8x16_t test_vcaddq_rot270_m_u8(uint8x16_t inactive, uint8x16_t a, uint8x16_t
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
 // CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
-// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 1, <8 x i16> [[INACTIVE:%.*]], <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 1, <8 x i16> [[INACTIVE:%.*]], <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
 // CHECK-NEXT:    ret <8 x i16> [[TMP2]]
 //
 uint16x8_t test_vcaddq_rot270_m_u16(uint16x8_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
@@ -393,7 +393,7 @@ uint16x8_t test_vcaddq_rot270_m_u16(uint16x8_t inactive, uint16x8_t a, uint16x8_
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
 // CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
-// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 1, <4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 1, <4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
 // CHECK-NEXT:    ret <4 x i32> [[TMP2]]
 //
 uint32x4_t test_vcaddq_rot270_m_u32(uint32x4_t inactive, uint32x4_t a, uint32x4_t b, mve_pred16_t p)
@@ -409,7 +409,7 @@ uint32x4_t test_vcaddq_rot270_m_u32(uint32x4_t inactive, uint32x4_t a, uint32x4_
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
 // CHECK-NEXT:    [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
-// CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 1, <16 x i8> [[INACTIVE:%.*]], <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 1, <16 x i8> [[INACTIVE:%.*]], <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]])
 // CHECK-NEXT:    ret <16 x i8> [[TMP2]]
 //
 int8x16_t test_vcaddq_rot270_m_s8(int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
@@ -425,7 +425,7 @@ int8x16_t test_vcaddq_rot270_m_s8(int8x16_t inactive, int8x16_t a, int8x16_t b,
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
 // CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
-// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 1, <8 x i16> [[INACTIVE:%.*]], <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 1, <8 x i16> [[INACTIVE:%.*]], <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
 // CHECK-NEXT:    ret <8 x i16> [[TMP2]]
 //
 int16x8_t test_vcaddq_rot270_m_s16(int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
@@ -441,7 +441,7 @@ int16x8_t test_vcaddq_rot270_m_s16(int16x8_t inactive, int16x8_t a, int16x8_t b,
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
 // CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
-// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 1, <4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 1, <4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
 // CHECK-NEXT:    ret <4 x i32> [[TMP2]]
 //
 int32x4_t test_vcaddq_rot270_m_s32(int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
@@ -457,7 +457,7 @@ int32x4_t test_vcaddq_rot270_m_s32(int32x4_t inactive, int32x4_t a, int32x4_t b,
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
 // CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
-// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcaddq.predicated.v8f16.v8i1(i32 0, i32 1, <8 x half> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcaddq.predicated.v8f16.v8i1(i32 1, i32 1, <8 x half> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x i1> [[TMP1]])
 // CHECK-NEXT:    ret <8 x half> [[TMP2]]
 //
 float16x8_t test_vcaddq_rot270_m_f16(float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
@@ -473,7 +473,7 @@ float16x8_t test_vcaddq_rot270_m_f16(float16x8_t inactive, float16x8_t a, float1
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
 // CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
-// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcaddq.predicated.v4f32.v4i1(i32 0, i32 1, <4 x float> [[INACTIVE:%.*]], <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcaddq.predicated.v4f32.v4i1(i32 1, i32 1, <4 x float> [[INACTIVE:%.*]], <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x i1> [[TMP1]])
 // CHECK-NEXT:    ret <4 x float> [[TMP2]]
 //
 float32x4_t test_vcaddq_rot270_m_f32(float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p)
@@ -489,7 +489,7 @@ float32x4_t test_vcaddq_rot270_m_f32(float32x4_t inactive, float32x4_t a, float3
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
 // CHECK-NEXT:    [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
-// CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 0, <16 x i8> undef, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 0, <16 x i8> undef, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]])
 // CHECK-NEXT:    ret <16 x i8> [[TMP2]]
 //
 uint8x16_t test_vcaddq_rot90_x_u8(uint8x16_t a, uint8x16_t b, mve_pred16_t p)
@@ -505,7 +505,7 @@ uint8x16_t test_vcaddq_rot90_x_u8(uint8x16_t a, uint8x16_t b, mve_pred16_t p)
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
 // CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
-// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 0, <8 x i16> undef, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 0, <8 x i16> undef, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
 // CHECK-NEXT:    ret <8 x i16> [[TMP2]]
 //
 uint16x8_t test_vcaddq_rot90_x_u16(uint16x8_t a, uint16x8_t b, mve_pred16_t p)
@@ -521,7 +521,7 @@ uint16x8_t test_vcaddq_rot90_x_u16(uint16x8_t a, uint16x8_t b, mve_pred16_t p)
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
 // CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
-// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 0, <4 x i32> undef, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 0, <4 x i32> undef, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
 // CHECK-NEXT:    ret <4 x i32> [[TMP2]]
 //
 uint32x4_t test_vcaddq_rot90_x_u32(uint32x4_t a, uint32x4_t b, mve_pred16_t p)
@@ -537,7 +537,7 @@ uint32x4_t test_vcaddq_rot90_x_u32(uint32x4_t a, uint32x4_t b, mve_pred16_t p)
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
 // CHECK-NEXT:    [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
-// CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 0, <16 x i8> undef, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 0, <16 x i8> undef, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]])
 // CHECK-NEXT:    ret <16 x i8> [[TMP2]]
 //
 int8x16_t test_vcaddq_rot90_x_s8(int8x16_t a, int8x16_t b, mve_pred16_t p)
@@ -553,7 +553,7 @@ int8x16_t test_vcaddq_rot90_x_s8(int8x16_t a, int8x16_t b, mve_pred16_t p)
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
 // CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
-// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 0, <8 x i16> undef, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 0, <8 x i16> undef, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
 // CHECK-NEXT:    ret <8 x i16> [[TMP2]]
 //
 int16x8_t test_vcaddq_rot90_x_s16(int16x8_t a, int16x8_t b, mve_pred16_t p)
@@ -569,7 +569,7 @@ int16x8_t test_vcaddq_rot90_x_s16(int16x8_t a, int16x8_t b, mve_pred16_t p)
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
 // CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
-// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 0, <4 x i32> undef, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 0, <4 x i32> undef, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
 // CHECK-NEXT:    ret <4 x i32> [[TMP2]]
 //
 int32x4_t test_vcaddq_rot90_x_s32(int32x4_t a, int32x4_t b, mve_pred16_t p)
@@ -585,7 +585,7 @@ int32x4_t test_vcaddq_rot90_x_s32(int32x4_t a, int32x4_t b, mve_pred16_t p)
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
 // CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
-// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcaddq.predicated.v8f16.v8i1(i32 0, i32 0, <8 x half> undef, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcaddq.predicated.v8f16.v8i1(i32 1, i32 0, <8 x half> undef, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x i1> [[TMP1]])
 // CHECK-NEXT:    ret <8 x half> [[TMP2]]
 //
 float16x8_t test_vcaddq_rot90_x_f16(float16x8_t a, float16x8_t b, mve_pred16_t p)
@@ -601,7 +601,7 @@ float16x8_t test_vcaddq_rot90_x_f16(float16x8_t a, float16x8_t b, mve_pred16_t p
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
 // CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
-// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcaddq.predicated.v4f32.v4i1(i32 0, i32 0, <4 x float> undef, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcaddq.predicated.v4f32.v4i1(i32 1, i32 0, <4 x float> undef, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x i1> [[TMP1]])
 // CHECK-NEXT:    ret <4 x float> [[TMP2]]
 //
 float32x4_t test_vcaddq_rot90_x_f32(float32x4_t a, float32x4_t b, mve_pred16_t p)
@@ -617,7 +617,7 @@ float32x4_t test_vcaddq_rot90_x_f32(float32x4_t a, float32x4_t b, mve_pred16_t p
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
 // CHECK-NEXT:    [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
-// CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 1, <16 x i8> undef, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 1, <16 x i8> undef, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]])
 // CHECK-NEXT:    ret <16 x i8> [[TMP2]]
 //
 uint8x16_t test_vcaddq_rot270_x_u8(uint8x16_t a, uint8x16_t b, mve_pred16_t p)
@@ -633,7 +633,7 @@ uint8x16_t test_vcaddq_rot270_x_u8(uint8x16_t a, uint8x16_t b, mve_pred16_t p)
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
 // CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
-// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 1, <8 x i16> undef, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 1, <8 x i16> undef, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
 // CHECK-NEXT:    ret <8 x i16> [[TMP2]]
 //
 uint16x8_t test_vcaddq_rot270_x_u16(uint16x8_t a, uint16x8_t b, mve_pred16_t p)
@@ -649,7 +649,7 @@ uint16x8_t test_vcaddq_rot270_x_u16(uint16x8_t a, uint16x8_t b, mve_pred16_t p)
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
 // CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
-// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 1, <4 x i32> undef, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 1, <4 x i32> undef, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
 // CHECK-NEXT:    ret <4 x i32> [[TMP2]]
 //
 uint32x4_t test_vcaddq_rot270_x_u32(uint32x4_t a, uint32x4_t b, mve_pred16_t p)
@@ -665,7 +665,7 @@ uint32x4_t test_vcaddq_rot270_x_u32(uint32x4_t a, uint32x4_t b, mve_pred16_t p)
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
 // CHECK-NEXT:    [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
-// CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 1, <16 x i8> undef, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 1, <16 x i8> undef, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]])
 // CHECK-NEXT:    ret <16 x i8> [[TMP2]]
 //
 int8x16_t test_vcaddq_rot270_x_s8(int8x16_t a, int8x16_t b, mve_pred16_t p)
@@ -681,7 +681,7 @@ int8x16_t test_vcaddq_rot270_x_s8(int8x16_t a, int8x16_t b, mve_pred16_t p)
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
 // CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
-// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 1, <8 x i16> undef, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 1, <8 x i16> undef, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
 // CHECK-NEXT:    ret <8 x i16> [[TMP2]]
 //
 int16x8_t test_vcaddq_rot270_x_s16(int16x8_t a, int16x8_t b, mve_pred16_t p)
@@ -697,7 +697,7 @@ int16x8_t test_vcaddq_rot270_x_s16(int16x8_t a, int16x8_t b, mve_pred16_t p)
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
 // CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
-// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 1, <4 x i32> undef, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 1, <4 x i32> undef, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
 // CHECK-NEXT:    ret <4 x i32> [[TMP2]]
 //
 int32x4_t test_vcaddq_rot270_x_s32(int32x4_t a, int32x4_t b, mve_pred16_t p)
@@ -713,7 +713,7 @@ int32x4_t test_vcaddq_rot270_x_s32(int32x4_t a, int32x4_t b, mve_pred16_t p)
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
 // CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
-// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcaddq.predicated.v8f16.v8i1(i32 0, i32 1, <8 x half> undef, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcaddq.predicated.v8f16.v8i1(i32 1, i32 1, <8 x half> undef, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x i1> [[TMP1]])
 // CHECK-NEXT:    ret <8 x half> [[TMP2]]
 //
 float16x8_t test_vcaddq_rot270_x_f16(float16x8_t a, float16x8_t b, mve_pred16_t p)
@@ -729,7 +729,7 @@ float16x8_t test_vcaddq_rot270_x_f16(float16x8_t a, float16x8_t b, mve_pred16_t
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
 // CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
-// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcaddq.predicated.v4f32.v4i1(i32 0, i32 1, <4 x float> undef, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcaddq.predicated.v4f32.v4i1(i32 1, i32 1, <4 x float> undef, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x i1> [[TMP1]])
 // CHECK-NEXT:    ret <4 x float> [[TMP2]]
 //
 float32x4_t test_vcaddq_rot270_x_f32(float32x4_t a, float32x4_t b, mve_pred16_t p)

diff  --git a/clang/test/CodeGen/arm-mve-intrinsics/vhcaddq.c b/clang/test/CodeGen/arm-mve-intrinsics/vhcaddq.c
index 7da8db5f1647..62335bf9af2e 100644
--- a/clang/test/CodeGen/arm-mve-intrinsics/vhcaddq.c
+++ b/clang/test/CodeGen/arm-mve-intrinsics/vhcaddq.c
@@ -6,7 +6,7 @@
 
 // CHECK-LABEL: @test_vhcaddq_rot90_s8(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 1, i32 0, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]])
+// CHECK-NEXT:    [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 0, i32 0, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]])
 // CHECK-NEXT:    ret <16 x i8> [[TMP0]]
 //
 int8x16_t test_vhcaddq_rot90_s8(int8x16_t a, int8x16_t b)
@@ -20,7 +20,7 @@ int8x16_t test_vhcaddq_rot90_s8(int8x16_t a, int8x16_t b)
 
 // CHECK-LABEL: @test_vhcaddq_rot90_s16(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 1, i32 0, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]])
+// CHECK-NEXT:    [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 0, i32 0, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]])
 // CHECK-NEXT:    ret <8 x i16> [[TMP0]]
 //
 int16x8_t test_vhcaddq_rot90_s16(int16x8_t a, int16x8_t b)
@@ -34,7 +34,7 @@ int16x8_t test_vhcaddq_rot90_s16(int16x8_t a, int16x8_t b)
 
 // CHECK-LABEL: @test_vhcaddq_rot90_s32(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 1, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]])
+// CHECK-NEXT:    [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 0, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]])
 // CHECK-NEXT:    ret <4 x i32> [[TMP0]]
 //
 int32x4_t test_vhcaddq_rot90_s32(int32x4_t a, int32x4_t b)
@@ -48,7 +48,7 @@ int32x4_t test_vhcaddq_rot90_s32(int32x4_t a, int32x4_t b)
 
 // CHECK-LABEL: @test_vhcaddq_rot270_s8(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 1, i32 1, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]])
+// CHECK-NEXT:    [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 0, i32 1, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]])
 // CHECK-NEXT:    ret <16 x i8> [[TMP0]]
 //
 int8x16_t test_vhcaddq_rot270_s8(int8x16_t a, int8x16_t b)
@@ -62,7 +62,7 @@ int8x16_t test_vhcaddq_rot270_s8(int8x16_t a, int8x16_t b)
 
 // CHECK-LABEL: @test_vhcaddq_rot270_s16(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 1, i32 1, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]])
+// CHECK-NEXT:    [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 0, i32 1, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]])
 // CHECK-NEXT:    ret <8 x i16> [[TMP0]]
 //
 int16x8_t test_vhcaddq_rot270_s16(int16x8_t a, int16x8_t b)
@@ -76,7 +76,7 @@ int16x8_t test_vhcaddq_rot270_s16(int16x8_t a, int16x8_t b)
 
 // CHECK-LABEL: @test_vhcaddq_rot270_s32(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 1, i32 1, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]])
+// CHECK-NEXT:    [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 0, i32 1, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]])
 // CHECK-NEXT:    ret <4 x i32> [[TMP0]]
 //
 int32x4_t test_vhcaddq_rot270_s32(int32x4_t a, int32x4_t b)
@@ -92,7 +92,7 @@ int32x4_t test_vhcaddq_rot270_s32(int32x4_t a, int32x4_t b)
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
 // CHECK-NEXT:    [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
-// CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 0, <16 x i8> undef, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 0, <16 x i8> undef, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]])
 // CHECK-NEXT:    ret <16 x i8> [[TMP2]]
 //
 int8x16_t test_vhcaddq_rot90_x_s8(int8x16_t a, int8x16_t b, mve_pred16_t p)
@@ -108,7 +108,7 @@ int8x16_t test_vhcaddq_rot90_x_s8(int8x16_t a, int8x16_t b, mve_pred16_t p)
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
 // CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
-// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 0, <8 x i16> undef, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 0, <8 x i16> undef, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
 // CHECK-NEXT:    ret <8 x i16> [[TMP2]]
 //
 int16x8_t test_vhcaddq_rot90_x_s16(int16x8_t a, int16x8_t b, mve_pred16_t p)
@@ -124,7 +124,7 @@ int16x8_t test_vhcaddq_rot90_x_s16(int16x8_t a, int16x8_t b, mve_pred16_t p)
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
 // CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
-// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 0, <4 x i32> undef, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 0, <4 x i32> undef, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
 // CHECK-NEXT:    ret <4 x i32> [[TMP2]]
 //
 int32x4_t test_vhcaddq_rot90_x_s32(int32x4_t a, int32x4_t b, mve_pred16_t p)
@@ -140,7 +140,7 @@ int32x4_t test_vhcaddq_rot90_x_s32(int32x4_t a, int32x4_t b, mve_pred16_t p)
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
 // CHECK-NEXT:    [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
-// CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 1, <16 x i8> undef, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 1, <16 x i8> undef, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]])
 // CHECK-NEXT:    ret <16 x i8> [[TMP2]]
 //
 int8x16_t test_vhcaddq_rot270_x_s8(int8x16_t a, int8x16_t b, mve_pred16_t p)
@@ -156,7 +156,7 @@ int8x16_t test_vhcaddq_rot270_x_s8(int8x16_t a, int8x16_t b, mve_pred16_t p)
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
 // CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
-// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 1, <8 x i16> undef, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 1, <8 x i16> undef, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
 // CHECK-NEXT:    ret <8 x i16> [[TMP2]]
 //
 int16x8_t test_vhcaddq_rot270_x_s16(int16x8_t a, int16x8_t b, mve_pred16_t p)
@@ -172,7 +172,7 @@ int16x8_t test_vhcaddq_rot270_x_s16(int16x8_t a, int16x8_t b, mve_pred16_t p)
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
 // CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
-// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 1, <4 x i32> undef, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 1, <4 x i32> undef, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
 // CHECK-NEXT:    ret <4 x i32> [[TMP2]]
 //
 int32x4_t test_vhcaddq_rot270_x_s32(int32x4_t a, int32x4_t b, mve_pred16_t p)
@@ -188,7 +188,7 @@ int32x4_t test_vhcaddq_rot270_x_s32(int32x4_t a, int32x4_t b, mve_pred16_t p)
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
 // CHECK-NEXT:    [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
-// CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 0, <16 x i8> [[INACTIVE:%.*]], <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 0, <16 x i8> [[INACTIVE:%.*]], <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]])
 // CHECK-NEXT:    ret <16 x i8> [[TMP2]]
 //
 int8x16_t test_vhcaddq_rot90_m_s8(int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
@@ -204,7 +204,7 @@ int8x16_t test_vhcaddq_rot90_m_s8(int8x16_t inactive, int8x16_t a, int8x16_t b,
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
 // CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
-// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 0, <8 x i16> [[INACTIVE:%.*]], <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 0, <8 x i16> [[INACTIVE:%.*]], <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
 // CHECK-NEXT:    ret <8 x i16> [[TMP2]]
 //
 int16x8_t test_vhcaddq_rot90_m_s16(int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
@@ -220,7 +220,7 @@ int16x8_t test_vhcaddq_rot90_m_s16(int16x8_t inactive, int16x8_t a, int16x8_t b,
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
 // CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
-// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 0, <4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 0, <4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
 // CHECK-NEXT:    ret <4 x i32> [[TMP2]]
 //
 int32x4_t test_vhcaddq_rot90_m_s32(int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
@@ -236,7 +236,7 @@ int32x4_t test_vhcaddq_rot90_m_s32(int32x4_t inactive, int32x4_t a, int32x4_t b,
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
 // CHECK-NEXT:    [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
-// CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 1, <16 x i8> [[INACTIVE:%.*]], <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 1, <16 x i8> [[INACTIVE:%.*]], <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]])
 // CHECK-NEXT:    ret <16 x i8> [[TMP2]]
 //
 int8x16_t test_vhcaddq_rot270_m_s8(int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
@@ -252,7 +252,7 @@ int8x16_t test_vhcaddq_rot270_m_s8(int8x16_t inactive, int8x16_t a, int8x16_t b,
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
 // CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
-// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 1, <8 x i16> [[INACTIVE:%.*]], <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 1, <8 x i16> [[INACTIVE:%.*]], <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
 // CHECK-NEXT:    ret <8 x i16> [[TMP2]]
 //
 int16x8_t test_vhcaddq_rot270_m_s16(int16x8_t inactive, int16x8_t a, int16x8_t b, mve_pred16_t p)
@@ -268,7 +268,7 @@ int16x8_t test_vhcaddq_rot270_m_s16(int16x8_t inactive, int16x8_t a, int16x8_t b
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
 // CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
-// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 1, <4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
+// CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 1, <4 x i32> [[INACTIVE:%.*]], <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
 // CHECK-NEXT:    ret <4 x i32> [[TMP2]]
 //
 int32x4_t test_vhcaddq_rot270_m_s32(int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)

diff  --git a/llvm/include/llvm/IR/IntrinsicsARM.td b/llvm/include/llvm/IR/IntrinsicsARM.td
index 6e63022d4cf5..578b22a3abb7 100644
--- a/llvm/include/llvm/IR/IntrinsicsARM.td
+++ b/llvm/include/llvm/IR/IntrinsicsARM.td
@@ -984,7 +984,9 @@ multiclass MVEMXPredicated<list<LLVMType> rets, list<LLVMType> flags,
 }
 
 // The first two parameters are compile-time constants:
-// * Halving: is the a halving (vhcaddq) or non-halving (vcaddq) instruction
+// * Halving: 0 means  halving (vhcaddq), 1 means non-halving (vcaddq) 
+//            instruction. Note: the flag is inverted to match the corresonding
+//            bit in the instruction encoding
 // * Rotation angle: 0 mean 90 deg, 1 means 180 deg
 defm int_arm_mve_vcaddq : MVEMXPredicated<
   [llvm_anyvector_ty],

diff  --git a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
index 93e1a8fd2a7f..887fd947bf3d 100644
--- a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
+++ b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp
@@ -233,22 +233,6 @@ class ARMDAGToDAGISel : public SelectionDAGISel {
   void SelectMVE_VADCSBC(SDNode *N, uint16_t OpcodeWithCarry,
                          uint16_t OpcodeWithNoCarry, bool Add, bool Predicated);
 
-  /// Select MVE complex vector addition intrinsic
-  /// OpcodesInt are opcodes for non-halving addition of complex integer vectors
-  /// OpcodesHInt are opcodes for halving addition of complex integer vectors
-  /// OpcodesFP are opcodes for addition of complex floating point vectors
-  void SelectMVE_VCADD(SDNode *N, const uint16_t *OpcodesInt,
-                       const uint16_t *OpcodesHInt, const uint16_t *OpcodesFP,
-                       bool Predicated);
-
-  /// Select MVE complex vector multiplication intrinsic
-  void SelectMVE_VCMUL(SDNode *N, uint16_t OpcodeF16, uint16_t OpcodeF32,
-                       bool Predicated);
-
-  /// Sekect NVE complex vector multiply-add intrinsic
-  void SelectMVE_VCMLA(SDNode *N, uint16_t OpcodeF16, uint16_t OpcodeF32,
-                       bool Predicated);
-
   /// SelectMVE_VLD - Select MVE interleaving load intrinsics. NumVecs
   /// should be 2 or 4. The opcode array specifies the instructions
   /// used for 8, 16 and 32-bit lane sizes respectively, and each
@@ -2533,138 +2517,6 @@ void ARMDAGToDAGISel::SelectMVE_VADCSBC(SDNode *N, uint16_t OpcodeWithCarry,
   CurDAG->SelectNodeTo(N, Opcode, N->getVTList(), makeArrayRef(Ops));
 }
 
-/// Convert an SDValue to a boolean value. SDVal must be a compile-time constant
-static bool SDValueToConstBool(SDValue SDVal) {
-  ConstantSDNode *SDValConstant = dyn_cast<ConstantSDNode>(SDVal);
-  assert(SDValConstant && "expected a compile-time constant");
-  uint64_t Value = SDValConstant->getZExtValue();
-  assert((Value == 0 || Value == 1) && "expected value 0 or 1");
-  return Value;
-}
-
-/// Select an opcode based on a floating point vector type. One opcode
-/// corresponds to 16-bit floating point element type, the other to two 32-bit
-/// element type.
-/// Other types are not allowed
-static uint16_t SelectFPOpcode(EVT VT, uint16_t OpcodeF16, uint16_t OpcodeF32) {
-  assert(VT.isFloatingPoint() && VT.isVector() &&
-         "expected a floating-point vector");
-  switch (VT.getVectorElementType().getSizeInBits()) {
-  case 16:
-    return OpcodeF16;
-  case 32:
-    return OpcodeF32;
-  default:
-    llvm_unreachable("bad vector element size");
-  }
-}
-
-void ARMDAGToDAGISel::SelectMVE_VCADD(SDNode *N, const uint16_t *OpcodesInt,
-                                      const uint16_t *OpcodesHInt,
-                                      const uint16_t *OpcodesFP,
-                                      bool Predicated) {
-  EVT VT = N->getValueType(0);
-  SDLoc Loc(N);
-
-  bool IsHalved = SDValueToConstBool(N->getOperand(1));
-  bool IsAngle270 = SDValueToConstBool(N->getOperand(2));
-  bool IsFP = VT.isFloatingPoint();
-  if (IsHalved)
-    assert(!IsFP && "vhcaddq requires integer vector type");
-
-  uint16_t Opcode;
-  if (IsFP) {
-    Opcode = SelectFPOpcode(VT, OpcodesFP[0], OpcodesFP[1]);
-  } else {
-    const uint16_t *Opcodes = IsHalved ? OpcodesHInt : OpcodesInt;
-    switch (VT.getVectorElementType().getSizeInBits()) {
-    case 8:
-      Opcode = Opcodes[0];
-      break;
-    case 16:
-      Opcode = Opcodes[1];
-      break;
-    case 32:
-      Opcode = Opcodes[2];
-      break;
-    default:
-      llvm_unreachable("bad vector element size");
-   }
-  }
-
-  int FirstInputOp = Predicated ? 4 : 3;
-  SmallVector<SDValue, 8> Ops;
-  // Vectors
-  Ops.push_back(N->getOperand(FirstInputOp));
-  Ops.push_back(N->getOperand(FirstInputOp + 1));
-  // Rotation
-  Ops.push_back(CurDAG->getTargetConstant(IsAngle270, Loc, MVT::i32));
-
-  if (Predicated)
-    AddMVEPredicateToOps(Ops, Loc,
-                         N->getOperand(FirstInputOp + 2),  // predicate
-                         N->getOperand(FirstInputOp - 1)); // inactive
-  else
-    AddEmptyMVEPredicateToOps(Ops, Loc, VT);
-
-  CurDAG->SelectNodeTo(N, Opcode, N->getVTList(), makeArrayRef(Ops));
-}
-
-static uint32_t GetCMulRotation(SDValue V) {
-  const ConstantSDNode *RotConstant = dyn_cast<ConstantSDNode>(V);
-  assert(RotConstant && "expected a compile-time constant");
-  uint64_t RotValue = RotConstant->getZExtValue();
-  assert(RotValue < 4 && "expected value in range [0, 3]");
-  return RotValue;
-}
-
-void ARMDAGToDAGISel::SelectMVE_VCMUL(SDNode *N, uint16_t OpcodeF16,
-                                      uint16_t OpcodeF32, bool Predicated) {
-  EVT VT = N->getValueType(0);
-  SDLoc Loc(N);
-
-  int FirstInputOp = Predicated ? 3 : 2;
-  SmallVector<SDValue, 8> Ops;
-  // Vectors
-  Ops.push_back(N->getOperand(FirstInputOp));
-  Ops.push_back(N->getOperand(FirstInputOp + 1));
-  // Rotation
-  uint32_t RotValue = GetCMulRotation(N->getOperand(1));
-  Ops.push_back(CurDAG->getTargetConstant(RotValue, Loc, MVT::i32));
-
-  if (Predicated)
-    AddMVEPredicateToOps(Ops, Loc,
-                         N->getOperand(FirstInputOp + 2),  // predicate
-                         N->getOperand(FirstInputOp - 1)); // inactive
-  else
-    AddEmptyMVEPredicateToOps(Ops, Loc, VT);
-
-  uint16_t Opcode = SelectFPOpcode(VT, OpcodeF16, OpcodeF32);
-  CurDAG->SelectNodeTo(N, Opcode, N->getVTList(), makeArrayRef(Ops));
-}
-
-void ARMDAGToDAGISel::SelectMVE_VCMLA(SDNode *N, uint16_t OpcodeF16,
-                                      uint16_t OpcodeF32, bool Predicated) {
-  SDLoc Loc(N);
-
-  SmallVector<SDValue, 8> Ops;
-  // The 3 vector operands
-  for (int i = 2; i < 5; ++i)
-    Ops.push_back(N->getOperand(i));
-  // Rotation
-  uint32_t RotValue = GetCMulRotation(N->getOperand(1));
-  Ops.push_back(CurDAG->getTargetConstant(RotValue, Loc, MVT::i32));
-
-  if (Predicated)
-    AddMVEPredicateToOps(Ops, Loc, N->getOperand(5));
-  else
-    AddEmptyMVEPredicateToOps(Ops, Loc);
-
-  EVT VT = N->getValueType(0);
-  uint16_t Opcode = SelectFPOpcode(VT, OpcodeF16, OpcodeF32);
-  CurDAG->SelectNodeTo(N, Opcode, N->getVTList(), makeArrayRef(Ops));
-}
-
 void ARMDAGToDAGISel::SelectMVE_VLD(SDNode *N, unsigned NumVecs,
                                     const uint16_t *const *Opcodes) {
   EVT VT = N->getValueType(0);
@@ -4510,35 +4362,6 @@ void ARMDAGToDAGISel::Select(SDNode *N) {
                         IntNo == Intrinsic::arm_mve_vadc_predicated);
       return;
 
-    case Intrinsic::arm_mve_vcaddq:
-    case Intrinsic::arm_mve_vcaddq_predicated: {
-      static const uint16_t OpcodesInt[] = {
-        ARM::MVE_VCADDi8, ARM::MVE_VCADDi16, ARM::MVE_VCADDi32,
-      };
-      static const uint16_t OpcodesHInt[] = {
-        ARM::MVE_VHCADDs8, ARM::MVE_VHCADDs16, ARM::MVE_VHCADDs32,
-      };
-      static const uint16_t OpcodesFP[] = {
-        ARM::MVE_VCADDf16, ARM::MVE_VCADDf32,
-      };
-
-      SelectMVE_VCADD(N, OpcodesInt, OpcodesHInt,
-                      OpcodesFP, IntNo == Intrinsic::arm_mve_vcaddq_predicated);
-      return;
-    }
-
-    case Intrinsic::arm_mve_vcmulq:
-    case Intrinsic::arm_mve_vcmulq_predicated:
-      SelectMVE_VCMUL(N, ARM::MVE_VCMULf16, ARM::MVE_VCMULf32,
-                      IntNo == Intrinsic::arm_mve_vcmulq_predicated);
-      return;
-
-    case Intrinsic::arm_mve_vcmlaq:
-    case Intrinsic::arm_mve_vcmlaq_predicated:
-      SelectMVE_VCMLA(N, ARM::MVE_VCMLAf16, ARM::MVE_VCMLAf32,
-                      IntNo == Intrinsic::arm_mve_vcmlaq_predicated);
-      return;
-
     }
     break;
   }

diff  --git a/llvm/lib/Target/ARM/ARMInstrMVE.td b/llvm/lib/Target/ARM/ARMInstrMVE.td
index d351ae8905b6..7bc067c25efc 100644
--- a/llvm/lib/Target/ARM/ARMInstrMVE.td
+++ b/llvm/lib/Target/ARM/ARMInstrMVE.td
@@ -2960,10 +2960,10 @@ multiclass MVE_VMUL_fp_m<MVEVectorVTInfo VTI>
 defm MVE_VMULf32 : MVE_VMUL_fp_m<MVE_v4f32>;
 defm MVE_VMULf16 : MVE_VMUL_fp_m<MVE_v8f16>;
 
-class MVE_VCMLA<string suffix, bit size, list<dag> pattern=[]>
+class MVE_VCMLA<string suffix, bit size>
   : MVEFloatArithNeon<"vcmla", suffix, size, (outs MQPR:$Qd),
                          (ins MQPR:$Qd_src, MQPR:$Qn, MQPR:$Qm, complexrotateop:$rot),
-                         "$Qd, $Qn, $Qm, $rot", vpred_n, "$Qd = $Qd_src", pattern> {
+                         "$Qd, $Qn, $Qm, $rot", vpred_n, "$Qd = $Qd_src", []> {
   bits<4> Qd;
   bits<4> Qn;
   bits<2> rot;
@@ -2980,8 +2980,32 @@ class MVE_VCMLA<string suffix, bit size, list<dag> pattern=[]>
   let Inst{4} = 0b0;
 }
 
-def MVE_VCMLAf16 : MVE_VCMLA<"f16", 0b0>;
-def MVE_VCMLAf32 : MVE_VCMLA<"f32", 0b1>;
+multiclass MVE_VCMLA_m<MVEVectorVTInfo VTI, bit size> {
+  def "" : MVE_VCMLA<VTI.Suffix, size>;
+
+  let Predicates = [HasMVEFloat] in {
+    def : Pat<(VTI.Vec (int_arm_mve_vcmlaq
+                            imm:$rot, (VTI.Vec MQPR:$Qd_src),
+                            (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm))),
+              (VTI.Vec (!cast<Instruction>(NAME)
+                            (VTI.Vec MQPR:$Qd_src),
+                            (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm),
+                            imm:$rot))>;
+
+    def : Pat<(VTI.Vec (int_arm_mve_vcmlaq_predicated
+                            imm:$rot, (VTI.Vec MQPR:$Qd_src),
+                            (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm),
+                            (VTI.Pred VCCR:$mask))),
+              (VTI.Vec (!cast<Instruction>(NAME)
+                            (VTI.Vec MQPR:$Qd_src), (VTI.Vec MQPR:$Qn),
+                            (VTI.Vec MQPR:$Qm), imm:$rot,
+                            ARMVCCThen, (VTI.Pred VCCR:$mask)))>;
+
+  }
+}
+
+defm MVE_VCMLAf16 : MVE_VCMLA_m<MVE_v8f16, 0b0>;
+defm MVE_VCMLAf32 : MVE_VCMLA_m<MVE_v4f32, 0b1>;
 
 class MVE_VADDSUBFMA_fp<string iname, string suffix, bit size, bit bit_4,
                         bit bit_8, bit bit_21, dag iops=(ins),
@@ -3056,10 +3080,10 @@ defm MVE_VADDf16 : MVE_VADD_fp_m<MVE_v8f16>;
 defm MVE_VSUBf32 : MVE_VSUB_fp_m<MVE_v4f32>;
 defm MVE_VSUBf16 : MVE_VSUB_fp_m<MVE_v8f16>;
 
-class MVE_VCADD<string suffix, bit size, string cstr="", list<dag> pattern=[]>
+class MVE_VCADD<string suffix, bit size, string cstr="">
   : MVEFloatArithNeon<"vcadd", suffix, size, (outs MQPR:$Qd),
                          (ins MQPR:$Qn, MQPR:$Qm, complexrotateopodd:$rot),
-                         "$Qd, $Qn, $Qm, $rot", vpred_r, cstr, pattern> {
+                         "$Qd, $Qn, $Qm, $rot", vpred_r, cstr, []> {
   bits<4> Qd;
   bits<4> Qn;
   bit rot;
@@ -3077,8 +3101,31 @@ class MVE_VCADD<string suffix, bit size, string cstr="", list<dag> pattern=[]>
   let Inst{4} = 0b0;
 }
 
-def MVE_VCADDf16 : MVE_VCADD<"f16", 0b0>;
-def MVE_VCADDf32 : MVE_VCADD<"f32", 0b1, "@earlyclobber $Qd">;
+multiclass MVE_VCADD_m<MVEVectorVTInfo VTI, bit size, string cstr=""> {
+  def "" : MVE_VCADD<VTI.Suffix, size, cstr>;
+
+  let Predicates = [HasMVEFloat] in {
+    def : Pat<(VTI.Vec (int_arm_mve_vcaddq (i32 1),
+                            imm:$rot, (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm))),
+              (VTI.Vec (!cast<Instruction>(NAME)
+                            (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm),
+                            imm:$rot))>;
+
+    def : Pat<(VTI.Vec (int_arm_mve_vcaddq_predicated (i32 1),
+                            imm:$rot, (VTI.Vec MQPR:$inactive),
+                            (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm),
+                            (VTI.Pred VCCR:$mask))),
+              (VTI.Vec (!cast<Instruction>(NAME)
+                            (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm),
+                            imm:$rot,
+                            ARMVCCThen, (VTI.Pred VCCR:$mask),
+                            (VTI.Vec MQPR:$inactive)))>;
+
+  }
+}
+
+defm MVE_VCADDf16 : MVE_VCADD_m<MVE_v8f16, 0b0>;
+defm MVE_VCADDf32 : MVE_VCADD_m<MVE_v4f32, 0b1, "@earlyclobber $Qd">;
 
 class MVE_VABD_fp<string suffix, bit size>
   : MVE_float<"vabd", suffix, (outs MQPR:$Qd), (ins MQPR:$Qn, MQPR:$Qm),
@@ -3690,10 +3737,10 @@ defm MVE_VQDMLSDHX  : MVE_VQxDMLxDH_multi<"vqdmlsdhx",  0b1, 0b0, 0b1>;
 defm MVE_VQRDMLSDH  : MVE_VQxDMLxDH_multi<"vqrdmlsdh",  0b0, 0b1, 0b1>;
 defm MVE_VQRDMLSDHX : MVE_VQxDMLxDH_multi<"vqrdmlsdhx", 0b1, 0b1, 0b1>;
 
-class MVE_VCMUL<string iname, string suffix, bit size, string cstr="", list<dag> pattern=[]>
+class MVE_VCMUL<string iname, string suffix, bit size, string cstr="">
   : MVE_qDest_qSrc<iname, suffix, (outs MQPR:$Qd),
                    (ins MQPR:$Qn, MQPR:$Qm, complexrotateop:$rot),
-                   "$Qd, $Qn, $Qm, $rot", vpred_r, cstr, pattern> {
+                   "$Qd, $Qn, $Qm, $rot", vpred_r, cstr, []> {
   bits<4> Qn;
   bits<2> rot;
 
@@ -3709,8 +3756,33 @@ class MVE_VCMUL<string iname, string suffix, bit size, string cstr="", list<dag>
   let Predicates = [HasMVEFloat];
 }
 
-def MVE_VCMULf16 : MVE_VCMUL<"vcmul", "f16", 0b0>;
-def MVE_VCMULf32 : MVE_VCMUL<"vcmul", "f32", 0b1, "@earlyclobber $Qd">;
+multiclass MVE_VCMUL_m<string iname, MVEVectorVTInfo VTI,
+                       bit size, string cstr=""> {
+  def "" : MVE_VCMUL<iname, VTI.Suffix, size, cstr>;
+
+
+  let Predicates = [HasMVEFloat] in {
+    def : Pat<(VTI.Vec (int_arm_mve_vcmulq
+                            imm:$rot, (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm))),
+              (VTI.Vec (!cast<Instruction>(NAME)
+                            (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm),
+                            imm:$rot))>;
+
+    def : Pat<(VTI.Vec (int_arm_mve_vcmulq_predicated
+                            imm:$rot, (VTI.Vec MQPR:$inactive),
+                            (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm),
+                            (VTI.Pred VCCR:$mask))),
+              (VTI.Vec (!cast<Instruction>(NAME)
+                            (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm),
+                            imm:$rot,
+                            ARMVCCThen, (VTI.Pred VCCR:$mask),
+                            (VTI.Vec MQPR:$inactive)))>;
+
+  }
+}
+
+defm MVE_VCMULf16 : MVE_VCMUL_m<"vcmul", MVE_v8f16, 0b0>;
+defm MVE_VCMULf32 : MVE_VCMUL_m<"vcmul", MVE_v4f32, 0b1, "@earlyclobber $Qd">;
 
 class MVE_VMULL<string iname, string suffix, bit bit_28, bits<2> bits_21_20,
                 bit T, string cstr, list<dag> pattern=[]>
@@ -3938,10 +4010,10 @@ defm MVE_VCVTf32f16bh : MVE_VCVT_h2f_m<"vcvtb", 0b0>;
 defm MVE_VCVTf32f16th : MVE_VCVT_h2f_m<"vcvtt", 0b1>;
 
 class MVE_VxCADD<string iname, string suffix, bits<2> size, bit halve,
-                 string cstr="", list<dag> pattern=[]>
+                 string cstr="">
   : MVE_qDest_qSrc<iname, suffix, (outs MQPR:$Qd),
                    (ins MQPR:$Qn, MQPR:$Qm, complexrotateopodd:$rot),
-                   "$Qd, $Qn, $Qm, $rot", vpred_r, cstr, pattern> {
+                   "$Qd, $Qn, $Qm, $rot", vpred_r, cstr, []> {
   bits<4> Qn;
   bit rot;
 
@@ -3955,13 +4027,37 @@ class MVE_VxCADD<string iname, string suffix, bits<2> size, bit halve,
   let Inst{0} = 0b0;
 }
 
-def MVE_VCADDi8   : MVE_VxCADD<"vcadd", "i8", 0b00, 0b1>;
-def MVE_VCADDi16  : MVE_VxCADD<"vcadd", "i16", 0b01, 0b1>;
-def MVE_VCADDi32  : MVE_VxCADD<"vcadd", "i32", 0b10, 0b1, "@earlyclobber $Qd">;
+multiclass MVE_VxCADD_m<string iname, MVEVectorVTInfo VTI,
+                        bit halve, string cstr=""> {
+  def "" : MVE_VxCADD<iname, VTI.Suffix, VTI.Size, halve, cstr>;
+
+  let Predicates = [HasMVEInt] in {
+    def : Pat<(VTI.Vec (int_arm_mve_vcaddq halve,
+                            imm:$rot, (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm))),
+              (VTI.Vec (!cast<Instruction>(NAME)
+                            (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm),
+                            imm:$rot))>;
+
+    def : Pat<(VTI.Vec (int_arm_mve_vcaddq_predicated halve,
+                            imm:$rot, (VTI.Vec MQPR:$inactive),
+                            (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm),
+                            (VTI.Pred VCCR:$mask))),
+              (VTI.Vec (!cast<Instruction>(NAME)
+                            (VTI.Vec MQPR:$Qn), (VTI.Vec MQPR:$Qm),
+                            imm:$rot,
+                            ARMVCCThen, (VTI.Pred VCCR:$mask),
+                            (VTI.Vec MQPR:$inactive)))>;
+
+  }
+}
+
+defm MVE_VCADDi8   : MVE_VxCADD_m<"vcadd", MVE_v16i8, 0b1>;
+defm MVE_VCADDi16  : MVE_VxCADD_m<"vcadd", MVE_v8i16, 0b1>;
+defm MVE_VCADDi32  : MVE_VxCADD_m<"vcadd", MVE_v4i32, 0b1, "@earlyclobber $Qd">;
 
-def MVE_VHCADDs8  : MVE_VxCADD<"vhcadd", "s8", 0b00, 0b0>;
-def MVE_VHCADDs16 : MVE_VxCADD<"vhcadd", "s16", 0b01, 0b0>;
-def MVE_VHCADDs32 : MVE_VxCADD<"vhcadd", "s32", 0b10, 0b0, "@earlyclobber $Qd">;
+defm MVE_VHCADDs8  : MVE_VxCADD_m<"vhcadd", MVE_v16s8, 0b0>;
+defm MVE_VHCADDs16 : MVE_VxCADD_m<"vhcadd", MVE_v8s16, 0b0>;
+defm MVE_VHCADDs32 : MVE_VxCADD_m<"vhcadd", MVE_v4s32, 0b0, "@earlyclobber $Qd">;
 
 class MVE_VADCSBC<string iname, bit I, bit subtract,
                   dag carryin, list<dag> pattern=[]>

diff  --git a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vcaddq.ll b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vcaddq.ll
index 34fbfa4d7a47..9bb24fc61cce 100644
--- a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vcaddq.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vcaddq.ll
@@ -23,7 +23,7 @@ define arm_aapcs_vfpcc <16 x i8> @test_vcaddq_rot90_u8(<16 x i8> %a, <16 x i8> %
 ; CHECK-NEXT:    vcadd.i8 q0, q0, q1, #90
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 0, i32 0, <16 x i8> %a, <16 x i8> %b)
+  %0 = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 1, i32 0, <16 x i8> %a, <16 x i8> %b)
   ret <16 x i8> %0
 }
 
@@ -33,7 +33,7 @@ define arm_aapcs_vfpcc <8 x i16> @test_vcaddq_rot90_u16(<8 x i16> %a, <8 x i16>
 ; CHECK-NEXT:    vcadd.i16 q0, q0, q1, #90
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 0, i32 0, <8 x i16> %a, <8 x i16> %b)
+  %0 = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 1, i32 0, <8 x i16> %a, <8 x i16> %b)
   ret <8 x i16> %0
 }
 
@@ -44,7 +44,7 @@ define arm_aapcs_vfpcc <4 x i32> @test_vcaddq_rot90_u32(<4 x i32> %a, <4 x i32>
 ; CHECK-NEXT:    vmov q0, q2
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 0, i32 0, <4 x i32> %a, <4 x i32> %b)
+  %0 = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 1, i32 0, <4 x i32> %a, <4 x i32> %b)
   ret <4 x i32> %0
 }
 
@@ -54,7 +54,7 @@ define arm_aapcs_vfpcc <16 x i8> @test_vcaddq_rot90_s8(<16 x i8> %a, <16 x i8> %
 ; CHECK-NEXT:    vcadd.i8 q0, q0, q1, #90
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 0, i32 0, <16 x i8> %a, <16 x i8> %b)
+  %0 = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 1, i32 0, <16 x i8> %a, <16 x i8> %b)
   ret <16 x i8> %0
 }
 
@@ -64,7 +64,7 @@ define arm_aapcs_vfpcc <8 x i16> @test_vcaddq_rot90_s16(<8 x i16> %a, <8 x i16>
 ; CHECK-NEXT:    vcadd.i16 q0, q0, q1, #90
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 0, i32 0, <8 x i16> %a, <8 x i16> %b)
+  %0 = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 1, i32 0, <8 x i16> %a, <8 x i16> %b)
   ret <8 x i16> %0
 }
 
@@ -75,7 +75,7 @@ define arm_aapcs_vfpcc <4 x i32> @test_vcaddq_rot90_s32(<4 x i32> %a, <4 x i32>
 ; CHECK-NEXT:    vmov q0, q2
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 0, i32 0, <4 x i32> %a, <4 x i32> %b)
+  %0 = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 1, i32 0, <4 x i32> %a, <4 x i32> %b)
   ret <4 x i32> %0
 }
 
@@ -85,7 +85,7 @@ define arm_aapcs_vfpcc <8 x half> @test_vcaddq_rot90_f16(<8 x half> %a, <8 x hal
 ; CHECK-NEXT:    vcadd.f16 q0, q0, q1, #90
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = call <8 x half> @llvm.arm.mve.vcaddq.v8f16(i32 0, i32 0, <8 x half> %a, <8 x half> %b)
+  %0 = call <8 x half> @llvm.arm.mve.vcaddq.v8f16(i32 1, i32 0, <8 x half> %a, <8 x half> %b)
   ret <8 x half> %0
 }
 
@@ -96,7 +96,7 @@ define arm_aapcs_vfpcc <4 x float> @test_vcaddq_rot90_f32(<4 x float> %a, <4 x f
 ; CHECK-NEXT:    vmov q0, q2
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = call <4 x float> @llvm.arm.mve.vcaddq.v4f32(i32 0, i32 0, <4 x float> %a, <4 x float> %b)
+  %0 = call <4 x float> @llvm.arm.mve.vcaddq.v4f32(i32 1, i32 0, <4 x float> %a, <4 x float> %b)
   ret <4 x float> %0
 }
 
@@ -106,7 +106,7 @@ define arm_aapcs_vfpcc <16 x i8> @test_vcaddq_rot270_u8(<16 x i8> %a, <16 x i8>
 ; CHECK-NEXT:    vcadd.i8 q0, q0, q1, #270
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 0, i32 1, <16 x i8> %a, <16 x i8> %b)
+  %0 = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 1, i32 1, <16 x i8> %a, <16 x i8> %b)
   ret <16 x i8> %0
 }
 
@@ -116,7 +116,7 @@ define arm_aapcs_vfpcc <8 x i16> @test_vcaddq_rot270_u16(<8 x i16> %a, <8 x i16>
 ; CHECK-NEXT:    vcadd.i16 q0, q0, q1, #270
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 0, i32 1, <8 x i16> %a, <8 x i16> %b)
+  %0 = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 1, i32 1, <8 x i16> %a, <8 x i16> %b)
   ret <8 x i16> %0
 }
 
@@ -127,7 +127,7 @@ define arm_aapcs_vfpcc <4 x i32> @test_vcaddq_rot270_u32(<4 x i32> %a, <4 x i32>
 ; CHECK-NEXT:    vmov q0, q2
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 0, i32 1, <4 x i32> %a, <4 x i32> %b)
+  %0 = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 1, i32 1, <4 x i32> %a, <4 x i32> %b)
   ret <4 x i32> %0
 }
 
@@ -137,7 +137,7 @@ define arm_aapcs_vfpcc <16 x i8> @test_vcaddq_rot270_s8(<16 x i8> %a, <16 x i8>
 ; CHECK-NEXT:    vcadd.i8 q0, q0, q1, #270
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 0, i32 1, <16 x i8> %a, <16 x i8> %b)
+  %0 = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 1, i32 1, <16 x i8> %a, <16 x i8> %b)
   ret <16 x i8> %0
 }
 
@@ -147,7 +147,7 @@ define arm_aapcs_vfpcc <8 x i16> @test_vcaddq_rot270_s16(<8 x i16> %a, <8 x i16>
 ; CHECK-NEXT:    vcadd.i16 q0, q0, q1, #270
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 0, i32 1, <8 x i16> %a, <8 x i16> %b)
+  %0 = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 1, i32 1, <8 x i16> %a, <8 x i16> %b)
   ret <8 x i16> %0
 }
 
@@ -158,7 +158,7 @@ define arm_aapcs_vfpcc <4 x i32> @test_vcaddq_rot270_s32(<4 x i32> %a, <4 x i32>
 ; CHECK-NEXT:    vmov q0, q2
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 0, i32 1, <4 x i32> %a, <4 x i32> %b)
+  %0 = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 1, i32 1, <4 x i32> %a, <4 x i32> %b)
   ret <4 x i32> %0
 }
 
@@ -168,7 +168,7 @@ define arm_aapcs_vfpcc <8 x half> @test_vcaddq_rot270_f16(<8 x half> %a, <8 x ha
 ; CHECK-NEXT:    vcadd.f16 q0, q0, q1, #270
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = call <8 x half> @llvm.arm.mve.vcaddq.v8f16(i32 0, i32 1, <8 x half> %a, <8 x half> %b)
+  %0 = call <8 x half> @llvm.arm.mve.vcaddq.v8f16(i32 1, i32 1, <8 x half> %a, <8 x half> %b)
   ret <8 x half> %0
 }
 
@@ -179,7 +179,7 @@ define arm_aapcs_vfpcc <4 x float> @test_vcaddq_rot270_f32(<4 x float> %a, <4 x
 ; CHECK-NEXT:    vmov q0, q2
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = call <4 x float> @llvm.arm.mve.vcaddq.v4f32(i32 0, i32 1, <4 x float> %a, <4 x float> %b)
+  %0 = call <4 x float> @llvm.arm.mve.vcaddq.v4f32(i32 1, i32 1, <4 x float> %a, <4 x float> %b)
   ret <4 x float> %0
 }
 
@@ -193,7 +193,7 @@ define arm_aapcs_vfpcc <16 x i8> @test_vcaddq_rot90_m_u8(<16 x i8> %inactive, <1
 entry:
   %0 = zext i16 %p to i32
   %1 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0)
-  %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 0, <16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1)
+  %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 0, <16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1)
   ret <16 x i8> %2
 }
 
@@ -207,7 +207,7 @@ define arm_aapcs_vfpcc <8 x i16> @test_vcaddq_rot90_m_u16(<8 x i16> %inactive, <
 entry:
   %0 = zext i16 %p to i32
   %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
-  %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 0, <8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1)
+  %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 0, <8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1)
   ret <8 x i16> %2
 }
 
@@ -221,7 +221,7 @@ define arm_aapcs_vfpcc <4 x i32> @test_vcaddq_rot90_m_u32(<4 x i32> %inactive, <
 entry:
   %0 = zext i16 %p to i32
   %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
-  %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 0, <4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1)
+  %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 0, <4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1)
   ret <4 x i32> %2
 }
 
@@ -235,7 +235,7 @@ define arm_aapcs_vfpcc <16 x i8> @test_vcaddq_rot90_m_s8(<16 x i8> %inactive, <1
 entry:
   %0 = zext i16 %p to i32
   %1 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0)
-  %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 0, <16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1)
+  %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 0, <16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1)
   ret <16 x i8> %2
 }
 
@@ -249,7 +249,7 @@ define arm_aapcs_vfpcc <8 x i16> @test_vcaddq_rot90_m_s16(<8 x i16> %inactive, <
 entry:
   %0 = zext i16 %p to i32
   %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
-  %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 0, <8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1)
+  %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 0, <8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1)
   ret <8 x i16> %2
 }
 
@@ -263,7 +263,7 @@ define arm_aapcs_vfpcc <4 x i32> @test_vcaddq_rot90_m_s32(<4 x i32> %inactive, <
 entry:
   %0 = zext i16 %p to i32
   %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
-  %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 0, <4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1)
+  %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 0, <4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1)
   ret <4 x i32> %2
 }
 
@@ -277,7 +277,7 @@ define arm_aapcs_vfpcc <8 x half> @test_vcaddq_rot90_m_f16(<8 x half> %inactive,
 entry:
   %0 = zext i16 %p to i32
   %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
-  %2 = call <8 x half> @llvm.arm.mve.vcaddq.predicated.v8f16.v8i1(i32 0, i32 0, <8 x half> %inactive, <8 x half> %a, <8 x half> %b, <8 x i1> %1)
+  %2 = call <8 x half> @llvm.arm.mve.vcaddq.predicated.v8f16.v8i1(i32 1, i32 0, <8 x half> %inactive, <8 x half> %a, <8 x half> %b, <8 x i1> %1)
   ret <8 x half> %2
 }
 
@@ -291,7 +291,7 @@ define arm_aapcs_vfpcc <4 x float> @test_vcaddq_rot90_m_f32(<4 x float> %inactiv
 entry:
   %0 = zext i16 %p to i32
   %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
-  %2 = call <4 x float> @llvm.arm.mve.vcaddq.predicated.v4f32.v4i1(i32 0, i32 0, <4 x float> %inactive, <4 x float> %a, <4 x float> %b, <4 x i1> %1)
+  %2 = call <4 x float> @llvm.arm.mve.vcaddq.predicated.v4f32.v4i1(i32 1, i32 0, <4 x float> %inactive, <4 x float> %a, <4 x float> %b, <4 x i1> %1)
   ret <4 x float> %2
 }
 
@@ -305,7 +305,7 @@ define arm_aapcs_vfpcc <16 x i8> @test_vcaddq_rot270_m_u8(<16 x i8> %inactive, <
 entry:
   %0 = zext i16 %p to i32
   %1 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0)
-  %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 1, <16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1)
+  %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 1, <16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1)
   ret <16 x i8> %2
 }
 
@@ -319,7 +319,7 @@ define arm_aapcs_vfpcc <8 x i16> @test_vcaddq_rot270_m_u16(<8 x i16> %inactive,
 entry:
   %0 = zext i16 %p to i32
   %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
-  %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 1, <8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1)
+  %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 1, <8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1)
   ret <8 x i16> %2
 }
 
@@ -333,7 +333,7 @@ define arm_aapcs_vfpcc <4 x i32> @test_vcaddq_rot270_m_u32(<4 x i32> %inactive,
 entry:
   %0 = zext i16 %p to i32
   %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
-  %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 1, <4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1)
+  %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 1, <4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1)
   ret <4 x i32> %2
 }
 
@@ -347,7 +347,7 @@ define arm_aapcs_vfpcc <16 x i8> @test_vcaddq_rot270_m_s8(<16 x i8> %inactive, <
 entry:
   %0 = zext i16 %p to i32
   %1 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0)
-  %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 1, <16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1)
+  %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 1, <16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1)
   ret <16 x i8> %2
 }
 
@@ -361,7 +361,7 @@ define arm_aapcs_vfpcc <8 x i16> @test_vcaddq_rot270_m_s16(<8 x i16> %inactive,
 entry:
   %0 = zext i16 %p to i32
   %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
-  %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 1, <8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1)
+  %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 1, <8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1)
   ret <8 x i16> %2
 }
 
@@ -375,7 +375,7 @@ define arm_aapcs_vfpcc <4 x i32> @test_vcaddq_rot270_m_s32(<4 x i32> %inactive,
 entry:
   %0 = zext i16 %p to i32
   %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
-  %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 1, <4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1)
+  %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 1, <4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1)
   ret <4 x i32> %2
 }
 
@@ -389,7 +389,7 @@ define arm_aapcs_vfpcc <8 x half> @test_vcaddq_rot270_m_f16(<8 x half> %inactive
 entry:
   %0 = zext i16 %p to i32
   %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
-  %2 = call <8 x half> @llvm.arm.mve.vcaddq.predicated.v8f16.v8i1(i32 0, i32 1, <8 x half> %inactive, <8 x half> %a, <8 x half> %b, <8 x i1> %1)
+  %2 = call <8 x half> @llvm.arm.mve.vcaddq.predicated.v8f16.v8i1(i32 1, i32 1, <8 x half> %inactive, <8 x half> %a, <8 x half> %b, <8 x i1> %1)
   ret <8 x half> %2
 }
 
@@ -403,7 +403,7 @@ define arm_aapcs_vfpcc <4 x float> @test_vcaddq_rot270_m_f32(<4 x float> %inacti
 entry:
   %0 = zext i16 %p to i32
   %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
-  %2 = call <4 x float> @llvm.arm.mve.vcaddq.predicated.v4f32.v4i1(i32 0, i32 1, <4 x float> %inactive, <4 x float> %a, <4 x float> %b, <4 x i1> %1)
+  %2 = call <4 x float> @llvm.arm.mve.vcaddq.predicated.v4f32.v4i1(i32 1, i32 1, <4 x float> %inactive, <4 x float> %a, <4 x float> %b, <4 x i1> %1)
   ret <4 x float> %2
 }
 
@@ -417,7 +417,7 @@ define arm_aapcs_vfpcc <16 x i8> @test_vcaddq_rot90_x_u8(<16 x i8> %a, <16 x i8>
 entry:
   %0 = zext i16 %p to i32
   %1 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0)
-  %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 0, <16 x i8> undef, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1)
+  %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 0, <16 x i8> undef, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1)
   ret <16 x i8> %2
 }
 
@@ -431,7 +431,7 @@ define arm_aapcs_vfpcc <8 x i16> @test_vcaddq_rot90_x_u16(<8 x i16> %a, <8 x i16
 entry:
   %0 = zext i16 %p to i32
   %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
-  %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 0, <8 x i16> undef, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1)
+  %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 0, <8 x i16> undef, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1)
   ret <8 x i16> %2
 }
 
@@ -446,7 +446,7 @@ define arm_aapcs_vfpcc <4 x i32> @test_vcaddq_rot90_x_u32(<4 x i32> %a, <4 x i32
 entry:
   %0 = zext i16 %p to i32
   %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
-  %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 0, <4 x i32> undef, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1)
+  %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 0, <4 x i32> undef, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1)
   ret <4 x i32> %2
 }
 
@@ -460,7 +460,7 @@ define arm_aapcs_vfpcc <16 x i8> @test_vcaddq_rot90_x_s8(<16 x i8> %a, <16 x i8>
 entry:
   %0 = zext i16 %p to i32
   %1 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0)
-  %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 0, <16 x i8> undef, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1)
+  %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 0, <16 x i8> undef, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1)
   ret <16 x i8> %2
 }
 
@@ -474,7 +474,7 @@ define arm_aapcs_vfpcc <8 x i16> @test_vcaddq_rot90_x_s16(<8 x i16> %a, <8 x i16
 entry:
   %0 = zext i16 %p to i32
   %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
-  %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 0, <8 x i16> undef, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1)
+  %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 0, <8 x i16> undef, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1)
   ret <8 x i16> %2
 }
 
@@ -489,7 +489,7 @@ define arm_aapcs_vfpcc <4 x i32> @test_vcaddq_rot90_x_s32(<4 x i32> %a, <4 x i32
 entry:
   %0 = zext i16 %p to i32
   %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
-  %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 0, <4 x i32> undef, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1)
+  %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 0, <4 x i32> undef, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1)
   ret <4 x i32> %2
 }
 
@@ -503,7 +503,7 @@ define arm_aapcs_vfpcc <8 x half> @test_vcaddq_rot90_x_f16(<8 x half> %a, <8 x h
 entry:
   %0 = zext i16 %p to i32
   %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
-  %2 = call <8 x half> @llvm.arm.mve.vcaddq.predicated.v8f16.v8i1(i32 0, i32 0, <8 x half> undef, <8 x half> %a, <8 x half> %b, <8 x i1> %1)
+  %2 = call <8 x half> @llvm.arm.mve.vcaddq.predicated.v8f16.v8i1(i32 1, i32 0, <8 x half> undef, <8 x half> %a, <8 x half> %b, <8 x i1> %1)
   ret <8 x half> %2
 }
 
@@ -518,7 +518,7 @@ define arm_aapcs_vfpcc <4 x float> @test_vcaddq_rot90_x_f32(<4 x float> %a, <4 x
 entry:
   %0 = zext i16 %p to i32
   %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
-  %2 = call <4 x float> @llvm.arm.mve.vcaddq.predicated.v4f32.v4i1(i32 0, i32 0, <4 x float> undef, <4 x float> %a, <4 x float> %b, <4 x i1> %1)
+  %2 = call <4 x float> @llvm.arm.mve.vcaddq.predicated.v4f32.v4i1(i32 1, i32 0, <4 x float> undef, <4 x float> %a, <4 x float> %b, <4 x i1> %1)
   ret <4 x float> %2
 }
 
@@ -532,7 +532,7 @@ define arm_aapcs_vfpcc <16 x i8> @test_vcaddq_rot270_x_u8(<16 x i8> %a, <16 x i8
 entry:
   %0 = zext i16 %p to i32
   %1 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0)
-  %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 1, <16 x i8> undef, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1)
+  %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 1, <16 x i8> undef, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1)
   ret <16 x i8> %2
 }
 
@@ -546,7 +546,7 @@ define arm_aapcs_vfpcc <8 x i16> @test_vcaddq_rot270_x_u16(<8 x i16> %a, <8 x i1
 entry:
   %0 = zext i16 %p to i32
   %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
-  %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 1, <8 x i16> undef, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1)
+  %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 1, <8 x i16> undef, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1)
   ret <8 x i16> %2
 }
 
@@ -561,7 +561,7 @@ define arm_aapcs_vfpcc <4 x i32> @test_vcaddq_rot270_x_u32(<4 x i32> %a, <4 x i3
 entry:
   %0 = zext i16 %p to i32
   %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
-  %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 1, <4 x i32> undef, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1)
+  %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 1, <4 x i32> undef, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1)
   ret <4 x i32> %2
 }
 
@@ -575,7 +575,7 @@ define arm_aapcs_vfpcc <16 x i8> @test_vcaddq_rot270_x_s8(<16 x i8> %a, <16 x i8
 entry:
   %0 = zext i16 %p to i32
   %1 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0)
-  %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 1, <16 x i8> undef, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1)
+  %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 1, <16 x i8> undef, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1)
   ret <16 x i8> %2
 }
 
@@ -589,7 +589,7 @@ define arm_aapcs_vfpcc <8 x i16> @test_vcaddq_rot270_x_s16(<8 x i16> %a, <8 x i1
 entry:
   %0 = zext i16 %p to i32
   %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
-  %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 1, <8 x i16> undef, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1)
+  %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 1, <8 x i16> undef, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1)
   ret <8 x i16> %2
 }
 
@@ -604,7 +604,7 @@ define arm_aapcs_vfpcc <4 x i32> @test_vcaddq_rot270_x_s32(<4 x i32> %a, <4 x i3
 entry:
   %0 = zext i16 %p to i32
   %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
-  %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 1, <4 x i32> undef, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1)
+  %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 1, <4 x i32> undef, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1)
   ret <4 x i32> %2
 }
 
@@ -618,7 +618,7 @@ define arm_aapcs_vfpcc <8 x half> @test_vcaddq_rot270_x_f16(<8 x half> %a, <8 x
 entry:
   %0 = zext i16 %p to i32
   %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
-  %2 = call <8 x half> @llvm.arm.mve.vcaddq.predicated.v8f16.v8i1(i32 0, i32 1, <8 x half> undef, <8 x half> %a, <8 x half> %b, <8 x i1> %1)
+  %2 = call <8 x half> @llvm.arm.mve.vcaddq.predicated.v8f16.v8i1(i32 1, i32 1, <8 x half> undef, <8 x half> %a, <8 x half> %b, <8 x i1> %1)
   ret <8 x half> %2
 }
 
@@ -633,7 +633,7 @@ define arm_aapcs_vfpcc <4 x float> @test_vcaddq_rot270_x_f32(<4 x float> %a, <4
 entry:
   %0 = zext i16 %p to i32
   %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
-  %2 = call <4 x float> @llvm.arm.mve.vcaddq.predicated.v4f32.v4i1(i32 0, i32 1, <4 x float> undef, <4 x float> %a, <4 x float> %b, <4 x i1> %1)
+  %2 = call <4 x float> @llvm.arm.mve.vcaddq.predicated.v4f32.v4i1(i32 1, i32 1, <4 x float> undef, <4 x float> %a, <4 x float> %b, <4 x i1> %1)
   ret <4 x float> %2
 }
 
@@ -643,7 +643,7 @@ define arm_aapcs_vfpcc <16 x i8> @test_vhcaddq_rot90_s8(<16 x i8> %a, <16 x i8>
 ; CHECK-NEXT:    vhcadd.s8 q0, q0, q1, #90
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 1, i32 0, <16 x i8> %a, <16 x i8> %b)
+  %0 = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 0, i32 0, <16 x i8> %a, <16 x i8> %b)
   ret <16 x i8> %0
 }
 
@@ -653,7 +653,7 @@ define arm_aapcs_vfpcc <8 x i16> @test_vhcaddq_rot90_s16(<8 x i16> %a, <8 x i16>
 ; CHECK-NEXT:    vhcadd.s16 q0, q0, q1, #90
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 1, i32 0, <8 x i16> %a, <8 x i16> %b)
+  %0 = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 0, i32 0, <8 x i16> %a, <8 x i16> %b)
   ret <8 x i16> %0
 }
 
@@ -664,7 +664,7 @@ define arm_aapcs_vfpcc <4 x i32> @test_vhcaddq_rot90_s32(<4 x i32> %a, <4 x i32>
 ; CHECK-NEXT:    vmov q0, q2
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 1, i32 0, <4 x i32> %a, <4 x i32> %b)
+  %0 = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 0, i32 0, <4 x i32> %a, <4 x i32> %b)
   ret <4 x i32> %0
 }
 
@@ -674,7 +674,7 @@ define arm_aapcs_vfpcc <16 x i8> @test_vhcaddq_rot270_s8(<16 x i8> %a, <16 x i8>
 ; CHECK-NEXT:    vhcadd.s8 q0, q0, q1, #270
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 1, i32 1, <16 x i8> %a, <16 x i8> %b)
+  %0 = call <16 x i8> @llvm.arm.mve.vcaddq.v16i8(i32 0, i32 1, <16 x i8> %a, <16 x i8> %b)
   ret <16 x i8> %0
 }
 
@@ -684,7 +684,7 @@ define arm_aapcs_vfpcc <8 x i16> @test_vhcaddq_rot270_s16(<8 x i16> %a, <8 x i16
 ; CHECK-NEXT:    vhcadd.s16 q0, q0, q1, #270
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 1, i32 1, <8 x i16> %a, <8 x i16> %b)
+  %0 = call <8 x i16> @llvm.arm.mve.vcaddq.v8i16(i32 0, i32 1, <8 x i16> %a, <8 x i16> %b)
   ret <8 x i16> %0
 }
 
@@ -695,7 +695,7 @@ define arm_aapcs_vfpcc <4 x i32> @test_vhcaddq_rot270_s32(<4 x i32> %a, <4 x i32
 ; CHECK-NEXT:    vmov q0, q2
 ; CHECK-NEXT:    bx lr
 entry:
-  %0 = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 1, i32 1, <4 x i32> %a, <4 x i32> %b)
+  %0 = call <4 x i32> @llvm.arm.mve.vcaddq.v4i32(i32 0, i32 1, <4 x i32> %a, <4 x i32> %b)
   ret <4 x i32> %0
 }
 
@@ -709,7 +709,7 @@ define arm_aapcs_vfpcc <16 x i8> @test_vhcaddq_rot90_x_s8(<16 x i8> %a, <16 x i8
 entry:
   %0 = zext i16 %p to i32
   %1 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0)
-  %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 0, <16 x i8> undef, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1)
+  %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 0, <16 x i8> undef, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1)
   ret <16 x i8> %2
 }
 
@@ -723,7 +723,7 @@ define arm_aapcs_vfpcc <8 x i16> @test_vhcaddq_rot90_x_s16(<8 x i16> %a, <8 x i1
 entry:
   %0 = zext i16 %p to i32
   %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
-  %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 0, <8 x i16> undef, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1)
+  %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 0, <8 x i16> undef, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1)
   ret <8 x i16> %2
 }
 
@@ -738,7 +738,7 @@ define arm_aapcs_vfpcc <4 x i32> @test_vhcaddq_rot90_x_s32(<4 x i32> %a, <4 x i3
 entry:
   %0 = zext i16 %p to i32
   %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
-  %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 0, <4 x i32> undef, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1)
+  %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 0, <4 x i32> undef, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1)
   ret <4 x i32> %2
 }
 
@@ -752,7 +752,7 @@ define arm_aapcs_vfpcc <16 x i8> @test_vhcaddq_rot270_x_s8(<16 x i8> %a, <16 x i
 entry:
   %0 = zext i16 %p to i32
   %1 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0)
-  %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 1, <16 x i8> undef, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1)
+  %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 1, <16 x i8> undef, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1)
   ret <16 x i8> %2
 }
 
@@ -766,7 +766,7 @@ define arm_aapcs_vfpcc <8 x i16> @test_vhcaddq_rot270_x_s16(<8 x i16> %a, <8 x i
 entry:
   %0 = zext i16 %p to i32
   %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
-  %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 1, <8 x i16> undef, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1)
+  %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 1, <8 x i16> undef, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1)
   ret <8 x i16> %2
 }
 
@@ -781,7 +781,7 @@ define arm_aapcs_vfpcc <4 x i32> @test_vhcaddq_rot270_x_s32(<4 x i32> %a, <4 x i
 entry:
   %0 = zext i16 %p to i32
   %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
-  %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 1, <4 x i32> undef, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1)
+  %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 1, <4 x i32> undef, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1)
   ret <4 x i32> %2
 }
 
@@ -795,7 +795,7 @@ define arm_aapcs_vfpcc <16 x i8> @test_vhcaddq_rot90_m_s8(<16 x i8> %inactive, <
 entry:
   %0 = zext i16 %p to i32
   %1 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0)
-  %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 0, <16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1)
+  %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 0, <16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1)
   ret <16 x i8> %2
 }
 
@@ -809,7 +809,7 @@ define arm_aapcs_vfpcc <8 x i16> @test_vhcaddq_rot90_m_s16(<8 x i16> %inactive,
 entry:
   %0 = zext i16 %p to i32
   %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
-  %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 0, <8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1)
+  %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 0, <8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1)
   ret <8 x i16> %2
 }
 
@@ -823,7 +823,7 @@ define arm_aapcs_vfpcc <4 x i32> @test_vhcaddq_rot90_m_s32(<4 x i32> %inactive,
 entry:
   %0 = zext i16 %p to i32
   %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
-  %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 0, <4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1)
+  %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 0, <4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1)
   ret <4 x i32> %2
 }
 
@@ -837,7 +837,7 @@ define arm_aapcs_vfpcc <16 x i8> @test_vhcaddq_rot270_m_s8(<16 x i8> %inactive,
 entry:
   %0 = zext i16 %p to i32
   %1 = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0)
-  %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 1, i32 1, <16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1)
+  %2 = call <16 x i8> @llvm.arm.mve.vcaddq.predicated.v16i8.v16i1(i32 0, i32 1, <16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, <16 x i1> %1)
   ret <16 x i8> %2
 }
 
@@ -851,7 +851,7 @@ define arm_aapcs_vfpcc <8 x i16> @test_vhcaddq_rot270_m_s16(<8 x i16> %inactive,
 entry:
   %0 = zext i16 %p to i32
   %1 = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0)
-  %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 1, i32 1, <8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1)
+  %2 = call <8 x i16> @llvm.arm.mve.vcaddq.predicated.v8i16.v8i1(i32 0, i32 1, <8 x i16> %inactive, <8 x i16> %a, <8 x i16> %b, <8 x i1> %1)
   ret <8 x i16> %2
 }
 
@@ -865,6 +865,6 @@ define arm_aapcs_vfpcc <4 x i32> @test_vhcaddq_rot270_m_s32(<4 x i32> %inactive,
 entry:
   %0 = zext i16 %p to i32
   %1 = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0)
-  %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 1, i32 1, <4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1)
+  %2 = call <4 x i32> @llvm.arm.mve.vcaddq.predicated.v4i32.v4i1(i32 0, i32 1, <4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, <4 x i1> %1)
   ret <4 x i32> %2
 }


        


More information about the llvm-commits mailing list