[clang] 03f419f - [SveEmitter] IsInsertOp1SVALL and builtins for svqdec[bhwd] and svqinc[bhwd]

Sander de Smalen via cfe-commits cfe-commits at lists.llvm.org
Mon Apr 27 03:46:02 PDT 2020


Author: Sander de Smalen
Date: 2020-04-27T11:45:10+01:00
New Revision: 03f419f3eb0c426a0a555be9abf7255a89b131cd

URL: https://github.com/llvm/llvm-project/commit/03f419f3eb0c426a0a555be9abf7255a89b131cd
DIFF: https://github.com/llvm/llvm-project/commit/03f419f3eb0c426a0a555be9abf7255a89b131cd.diff

LOG: [SveEmitter] IsInsertOp1SVALL and builtins for svqdec[bhwd] and svqinc[bhwd]

Some ACLE builtins leave out the argument to specify the predicate
pattern, which is expected to be expanded to an SV_ALL pattern.

This patch adds the flag IsInsertOp1SVALL to insert SV_ALL as the
second operand.

Reviewers: efriedma, SjoerdMeijer

Reviewed By: SjoerdMeijer

Tags: #clang

Differential Revision: https://reviews.llvm.org/D78401

Added: 
    clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_qdecb.c
    clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_qdecd.c
    clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_qdecw.c
    clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_qincb.c
    clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_qincd.c
    clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_qinch.c
    clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_qincw.c
    clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_qdecb.c
    clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_qdecd.c
    clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_qdecw.c
    clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_qincb.c
    clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_qincd.c
    clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_qinch.c
    clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_qincw.c

Modified: 
    clang/include/clang/Basic/TargetBuiltins.h
    clang/include/clang/Basic/arm_sve.td
    clang/lib/CodeGen/CGBuiltin.cpp
    clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_qdech.c
    clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_qdech.c
    clang/utils/TableGen/SveEmitter.cpp

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/Basic/TargetBuiltins.h b/clang/include/clang/Basic/TargetBuiltins.h
index 8d25dedfef7f..0a06ba3e5ecc 100644
--- a/clang/include/clang/Basic/TargetBuiltins.h
+++ b/clang/include/clang/Basic/TargetBuiltins.h
@@ -241,6 +241,7 @@ namespace clang {
     bool isPrefetch() const { return Flags & IsPrefetch; }
     bool isReverseCompare() const { return Flags & ReverseCompare; }
     bool isAppendSVALL() const { return Flags & IsAppendSVALL; }
+    bool isInsertOp1SVALL() const { return Flags & IsInsertOp1SVALL; }
 
     uint64_t getBits() const { return Flags; }
     bool isFlagSet(uint64_t Flag) const { return Flags & Flag; }

diff  --git a/clang/include/clang/Basic/arm_sve.td b/clang/include/clang/Basic/arm_sve.td
index af6c971000f4..a5cacd2103a8 100644
--- a/clang/include/clang/Basic/arm_sve.td
+++ b/clang/include/clang/Basic/arm_sve.td
@@ -64,6 +64,7 @@
 // d: default
 // c: const pointer type
 // P: predicate type
+// s: scalar of element type
 // a: scalar of element type (splat to vector type)
 // e: 1/2 width unsigned elements, 2x element count
 // h: 1/2 width elements, 2x element count
@@ -182,6 +183,7 @@ def IsOverloadCvt             : FlagType<0x00800000>; // Use {typeof(operand0),
 def OverloadKindMask          : FlagType<0x00E00000>; // When the masked values are all '0', the default type is used as overload type.
 def IsByteIndexed             : FlagType<0x01000000>;
 def IsAppendSVALL             : FlagType<0x02000000>; // Appends SV_ALL as the last operand.
+def IsInsertOp1SVALL          : FlagType<0x04000000>; // Inserts SV_ALL as the second operand.
 def IsPrefetch                : FlagType<0x08000000>; // Contiguous prefetches.
 def ReverseCompare            : FlagType<0x20000000>; // Compare operands must be swapped.
 
@@ -827,11 +829,6 @@ def SVCVTXNT_F32    : SInst<"svcvtxnt_f32[_f64]", "MMPd", "d", MergeOp1, "aarch6
 def SVCADD_M : SInst<"svcadd[_{d}]", "dPddi",  "hfd", MergeOp1,  "aarch64_sve_fcadd", [], [ImmCheck<3, ImmCheckComplexRot90_270>]>;
 def SVCMLA_M : SInst<"svcmla[_{d}]", "dPdddi", "hfd", MergeOp1,  "aarch64_sve_fcmla", [], [ImmCheck<4, ImmCheckComplexRotAll90>]>;
 
-////////////////////////////////////////////////////////////////////////////////
-// Saturating scalar arithmetic
-def SVQDECH_S : SInst<"svqdech_pat[_{d}]",   "ddIi", "s", MergeNone, "aarch64_sve_sqdech", [], [ImmCheck<2, ImmCheck1_16>]>;
-def SVQDECH_U : SInst<"svqdech_pat[_{d}]",   "ddIi", "Us", MergeNone, "aarch64_sve_uqdech", [], [ImmCheck<2, ImmCheck1_16>]>;
-
 
 ////////////////////////////////////////////////////////////////////////////////
 // Predicate creation
@@ -853,6 +850,55 @@ def SVCNTB : SInst<"svcntb", "n", "", MergeNone, "aarch64_sve_cntb", [IsAppendSV
 def SVCNTH : SInst<"svcnth", "n", "", MergeNone, "aarch64_sve_cnth", [IsAppendSVALL, IsOverloadNone]>;
 def SVCNTW : SInst<"svcntw", "n", "", MergeNone, "aarch64_sve_cntw", [IsAppendSVALL, IsOverloadNone]>;
 def SVCNTD : SInst<"svcntd", "n", "", MergeNone, "aarch64_sve_cntd", [IsAppendSVALL, IsOverloadNone]>;
+
+////////////////////////////////////////////////////////////////////////////////
+// Saturating scalar arithmetic
+
+class sat_type<string u, string t> { string U = u; string T = t; }
+def SignedByte         : sat_type<"",  "c">;
+def SignedHalf         : sat_type<"",  "s">;
+def SignedWord         : sat_type<"",  "i">;
+def SignedDoubleWord   : sat_type<"",  "l">;
+def UnsignedByte       : sat_type<"U", "Uc">;
+def UnsignedHalf       : sat_type<"U", "Us">;
+def UnsignedWord       : sat_type<"U", "Ui">;
+def UnsignedDoubleWord : sat_type<"U", "Ul">;
+
+multiclass SInst_SAT1<string name, string intrinsic, sat_type type> {
+  def _N32     : SInst<name # "_pat[_n_{d}]", "ssIi", type.U # "i", MergeNone, intrinsic # "_n32", [IsOverloadNone], [ImmCheck<2, ImmCheck1_16>]>;
+  def _N64     : SInst<name # "_pat[_n_{d}]", "ssIi", type.U # "l", MergeNone, intrinsic # "_n64", [IsOverloadNone], [ImmCheck<2, ImmCheck1_16>]>;
+  def _N32_ALL : SInst<name # "[_n_{d}]",     "ssi",  type.U # "i", MergeNone, intrinsic # "_n32", [IsOverloadNone, IsInsertOp1SVALL], [ImmCheck<1, ImmCheck1_16>]>;
+  def _N64_ALL : SInst<name # "[_n_{d}]",     "ssi",  type.U # "l", MergeNone, intrinsic # "_n64", [IsOverloadNone, IsInsertOp1SVALL], [ImmCheck<1, ImmCheck1_16>]>;
+}
+
+multiclass SInst_SAT2<string name, string intrinsic, sat_type type> {
+  def ""       : SInst<name # "_pat[_{d}]",   "ddIi", type.T,       MergeNone, intrinsic, [], [ImmCheck<2, ImmCheck1_16>]>;
+  def _ALL     : SInst<name # "[_{d}]",       "ddi",  type.T,       MergeNone, intrinsic, [IsInsertOp1SVALL], [ImmCheck<1, ImmCheck1_16>]>;
+
+  def _N32     : SInst<name # "_pat[_n_{d}]", "ssIi", type.U # "i", MergeNone, intrinsic # "_n32", [IsOverloadNone], [ImmCheck<2, ImmCheck1_16>]>;
+  def _N64     : SInst<name # "_pat[_n_{d}]", "ssIi", type.U # "l", MergeNone, intrinsic # "_n64", [IsOverloadNone], [ImmCheck<2, ImmCheck1_16>]>;
+  def _N32_ALL : SInst<name # "[_n_{d}]",     "ssi",  type.U # "i", MergeNone, intrinsic # "_n32", [IsOverloadNone, IsInsertOp1SVALL], [ImmCheck<1, ImmCheck1_16>]>;
+  def _N64_ALL : SInst<name # "[_n_{d}]",     "ssi",  type.U # "l", MergeNone, intrinsic # "_n64", [IsOverloadNone, IsInsertOp1SVALL], [ImmCheck<1, ImmCheck1_16>]>;
+}
+
+defm SVQDECB_S : SInst_SAT1<"svqdecb", "aarch64_sve_sqdecb", SignedByte>;
+defm SVQDECB_U : SInst_SAT1<"svqdecb", "aarch64_sve_uqdecb", UnsignedByte>;
+defm SVQDECH_S : SInst_SAT2<"svqdech", "aarch64_sve_sqdech", SignedHalf>;
+defm SVQDECH_U : SInst_SAT2<"svqdech", "aarch64_sve_uqdech", UnsignedHalf>;
+defm SVQDECW_S : SInst_SAT2<"svqdecw", "aarch64_sve_sqdecw", SignedWord>;
+defm SVQDECW_U : SInst_SAT2<"svqdecw", "aarch64_sve_uqdecw", UnsignedWord>;
+defm SVQDECD_S : SInst_SAT2<"svqdecd", "aarch64_sve_sqdecd", SignedDoubleWord>;
+defm SVQDECD_U : SInst_SAT2<"svqdecd", "aarch64_sve_uqdecd", UnsignedDoubleWord>;
+
+defm SVQINCB_S : SInst_SAT1<"svqincb", "aarch64_sve_sqincb", SignedByte>;
+defm SVQINCB_U : SInst_SAT1<"svqincb", "aarch64_sve_uqincb", UnsignedByte>;
+defm SVQINCH_S : SInst_SAT2<"svqinch", "aarch64_sve_sqinch", SignedHalf>;
+defm SVQINCH_U : SInst_SAT2<"svqinch", "aarch64_sve_uqinch", UnsignedHalf>;
+defm SVQINCW_S : SInst_SAT2<"svqincw", "aarch64_sve_sqincw", SignedWord>;
+defm SVQINCW_U : SInst_SAT2<"svqincw", "aarch64_sve_uqincw", UnsignedWord>;
+defm SVQINCD_S : SInst_SAT2<"svqincd", "aarch64_sve_sqincd", SignedDoubleWord>;
+defm SVQINCD_U : SInst_SAT2<"svqincd", "aarch64_sve_uqincd", UnsignedDoubleWord>;
+
 ////////////////////////////////////////////////////////////////////////////////
 // Integer arithmetic
 def SVDOT_LANE_S : SInst<"svdot_lane[_{d}]",  "ddqqi",  "il",   MergeNone, "aarch64_sve_sdot_lane", [], [ImmCheck<3, ImmCheckLaneIndexDot, 2>]>;

diff  --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index ba5ea9d94023..e11791a7a7df 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -7913,6 +7913,8 @@ Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,
     // pattern, which is expected to be expanded to an SV_ALL pattern.
     if (TypeFlags.isAppendSVALL())
       Ops.push_back(Builder.getInt32(/*SV_ALL*/ 31));
+    if (TypeFlags.isInsertOp1SVALL())
+      Ops.insert(&Ops[1], Builder.getInt32(/*SV_ALL*/ 31));
 
     // Predicates must match the main datatype.
     for (unsigned i = 0, e = Ops.size(); i != e; ++i)

diff  --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_qdecb.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_qdecb.c
new file mode 100644
index 000000000000..ac8d4af0880b
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_qdecb.c
@@ -0,0 +1,83 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+int32_t test_svqdecb_n_s32(int32_t op)
+{
+  // CHECK-LABEL: test_svqdecb_n_s32
+  // CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.sqdecb.n32(i32 %op, i32 31, i32 1)
+  // CHECK: ret i32 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqdecb,_n_s32,,)(op, 1);
+}
+
+int32_t test_svqdecb_n_s32_1(int32_t op)
+{
+  // CHECK-LABEL: test_svqdecb_n_s32_1
+  // CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.sqdecb.n32(i32 %op, i32 31, i32 16)
+  // CHECK: ret i32 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqdecb,_n_s32,,)(op, 16);
+}
+
+int64_t test_svqdecb_n_s64(int64_t op)
+{
+  // CHECK-LABEL: test_svqdecb_n_s64
+  // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.sqdecb.n64(i64 %op, i32 31, i32 1)
+  // CHECK: ret i64 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqdecb,_n_s64,,)(op, 1);
+}
+
+uint32_t test_svqdecb_n_u32(uint32_t op)
+{
+  // CHECK-LABEL: test_svqdecb_n_u32
+  // CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.uqdecb.n32(i32 %op, i32 31, i32 16)
+  // CHECK: ret i32 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqdecb,_n_u32,,)(op, 16);
+}
+
+uint64_t test_svqdecb_n_u64(uint64_t op)
+{
+  // CHECK-LABEL: test_svqdecb_n_u64
+  // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.uqdecb.n64(i64 %op, i32 31, i32 1)
+  // CHECK: ret i64 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqdecb,_n_u64,,)(op, 1);
+}
+
+int32_t test_svqdecb_pat_n_s32(int32_t op)
+{
+  // CHECK-LABEL: test_svqdecb_pat_n_s32
+  // CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.sqdecb.n32(i32 %op, i32 0, i32 16)
+  // CHECK: ret i32 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqdecb_pat,_n_s32,,)(op, SV_POW2, 16);
+}
+
+int64_t test_svqdecb_pat_n_s64(int64_t op)
+{
+  // CHECK-LABEL: test_svqdecb_pat_n_s64
+  // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.sqdecb.n64(i64 %op, i32 1, i32 1)
+  // CHECK: ret i64 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqdecb_pat,_n_s64,,)(op, SV_VL1, 1);
+}
+
+uint32_t test_svqdecb_pat_n_u32(uint32_t op)
+{
+  // CHECK-LABEL: test_svqdecb_pat_n_u32
+  // CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.uqdecb.n32(i32 %op, i32 2, i32 16)
+  // CHECK: ret i32 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqdecb_pat,_n_u32,,)(op, SV_VL2, 16);
+}
+
+uint64_t test_svqdecb_pat_n_u64(uint64_t op)
+{
+  // CHECK-LABEL: test_svqdecb_pat_n_u64
+  // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.uqdecb.n64(i64 %op, i32 3, i32 1)
+  // CHECK: ret i64 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqdecb_pat,_n_u64,,)(op, SV_VL3, 1);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_qdecd.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_qdecd.c
new file mode 100644
index 000000000000..3adc8b73d2d3
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_qdecd.c
@@ -0,0 +1,115 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+int32_t test_svqdecd_n_s32(int32_t op)
+{
+  // CHECK-LABEL: test_svqdecd_n_s32
+  // CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.sqdecd.n32(i32 %op, i32 31, i32 1)
+  // CHECK: ret i32 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqdecd,_n_s32,,)(op, 1);
+}
+
+int32_t test_svqdecd_n_s32_1(int32_t op)
+{
+  // CHECK-LABEL: test_svqdecd_n_s32_1
+  // CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.sqdecd.n32(i32 %op, i32 31, i32 16)
+  // CHECK: ret i32 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqdecd,_n_s32,,)(op, 16);
+}
+
+int64_t test_svqdecd_n_s64(int64_t op)
+{
+  // CHECK-LABEL: test_svqdecd_n_s64
+  // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.sqdecd.n64(i64 %op, i32 31, i32 1)
+  // CHECK: ret i64 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqdecd,_n_s64,,)(op, 1);
+}
+
+uint32_t test_svqdecd_n_u32(uint32_t op)
+{
+  // CHECK-LABEL: test_svqdecd_n_u32
+  // CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.uqdecd.n32(i32 %op, i32 31, i32 16)
+  // CHECK: ret i32 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqdecd,_n_u32,,)(op, 16);
+}
+
+uint64_t test_svqdecd_n_u64(uint64_t op)
+{
+  // CHECK-LABEL: test_svqdecd_n_u64
+  // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.uqdecd.n64(i64 %op, i32 31, i32 1)
+  // CHECK: ret i64 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqdecd,_n_u64,,)(op, 1);
+}
+
+int32_t test_svqdecd_pat_n_s32(int32_t op)
+{
+  // CHECK-LABEL: test_svqdecd_pat_n_s32
+  // CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.sqdecd.n32(i32 %op, i32 4, i32 16)
+  // CHECK: ret i32 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqdecd_pat,_n_s32,,)(op, SV_VL4, 16);
+}
+
+int64_t test_svqdecd_pat_n_s64(int64_t op)
+{
+  // CHECK-LABEL: test_svqdecd_pat_n_s64
+  // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.sqdecd.n64(i64 %op, i32 5, i32 1)
+  // CHECK: ret i64 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqdecd_pat,_n_s64,,)(op, SV_VL5, 1);
+}
+
+uint32_t test_svqdecd_pat_n_u32(uint32_t op)
+{
+  // CHECK-LABEL: test_svqdecd_pat_n_u32
+  // CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.uqdecd.n32(i32 %op, i32 6, i32 16)
+  // CHECK: ret i32 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqdecd_pat,_n_u32,,)(op, SV_VL6, 16);
+}
+
+uint64_t test_svqdecd_pat_n_u64(uint64_t op)
+{
+  // CHECK-LABEL: test_svqdecd_pat_n_u64
+  // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.uqdecd.n64(i64 %op, i32 7, i32 1)
+  // CHECK: ret i64 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqdecd_pat,_n_u64,,)(op, SV_VL7, 1);
+}
+
+svint64_t test_svqdecd_s64(svint64_t op)
+{
+  // CHECK-LABEL: test_svqdecd_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdecd.nxv2i64(<vscale x 2 x i64> %op, i32 31, i32 16)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqdecd,_s64,,)(op, 16);
+}
+
+svuint64_t test_svqdecd_u64(svuint64_t op)
+{
+  // CHECK-LABEL: test_svqdecd_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uqdecd.nxv2i64(<vscale x 2 x i64> %op, i32 31, i32 1)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqdecd,_u64,,)(op, 1);
+}
+
+svint64_t test_svqdecd_pat_s64(svint64_t op)
+{
+  // CHECK-LABEL: test_svqdecd_pat_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdecd.nxv2i64(<vscale x 2 x i64> %op, i32 8, i32 16)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqdecd_pat,_s64,,)(op, SV_VL8, 16);
+}
+
+svuint64_t test_svqdecd_pat_u64(svuint64_t op)
+{
+  // CHECK-LABEL: test_svqdecd_pat_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uqdecd.nxv2i64(<vscale x 2 x i64> %op, i32 9, i32 1)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqdecd_pat,_u64,,)(op, SV_VL16, 1);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_qdech.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_qdech.c
index e70c5739c2f9..6b61d0646d5f 100644
--- a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_qdech.c
+++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_qdech.c
@@ -10,154 +10,106 @@
 #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
 #endif
 
-svint16_t test_svqdech_pat_s16(svint16_t op)
-{
-  // CHECK-LABEL: test_svqdech_pat_s16
-  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqdech.nxv8i16(<vscale x 8 x i16> %op, i32 0, i32 1)
-  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
-  return svqdech_pat_s16(op, SV_POW2, 1);
-}
-
-svint16_t test_svqdech_pat_s16_all(svint16_t op)
-{
-  // CHECK-LABEL: test_svqdech_pat_s16_all
-  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqdech.nxv8i16(<vscale x 8 x i16> %op, i32 31, i32 16)
-  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
-  return svqdech_pat_s16(op, SV_ALL, 16);
-}
-
-svuint16_t test_svqdech_pat_u16_pow2(svuint16_t op)
+int32_t test_svqdech_n_s32(int32_t op)
 {
-  // CHECK-LABEL: test_svqdech_pat_u16_pow2
-  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uqdech.nxv8i16(<vscale x 8 x i16> %op, i32 0, i32 16)
-  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
-  return SVE_ACLE_FUNC(svqdech_pat,_u16,,)(op, SV_POW2, 16);
+  // CHECK-LABEL: test_svqdech_n_s32
+  // CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.sqdech.n32(i32 %op, i32 31, i32 1)
+  // CHECK: ret i32 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqdech,_n_s32,,)(op, 1);
 }
 
-svuint16_t test_svqdech_pat_u16_vl1(svuint16_t op)
+int32_t test_svqdech_n_s32_1(int32_t op)
 {
-  // CHECK-LABEL: test_svqdech_pat_u16_vl1
-  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uqdech.nxv8i16(<vscale x 8 x i16> %op, i32 1, i32 16)
-  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
-  return SVE_ACLE_FUNC(svqdech_pat,_u16,,)(op, SV_VL1, 16);
+  // CHECK-LABEL: test_svqdech_n_s32_1
+  // CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.sqdech.n32(i32 %op, i32 31, i32 16)
+  // CHECK: ret i32 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqdech,_n_s32,,)(op, 16);
 }
 
-svuint16_t test_svqdech_pat_u16_vl2(svuint16_t op)
+int64_t test_svqdech_n_s64(int64_t op)
 {
-  // CHECK-LABEL: test_svqdech_pat_u16_vl2
-  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uqdech.nxv8i16(<vscale x 8 x i16> %op, i32 2, i32 16)
-  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
-  return SVE_ACLE_FUNC(svqdech_pat,_u16,,)(op, SV_VL2, 16);
+  // CHECK-LABEL: test_svqdech_n_s64
+  // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.sqdech.n64(i64 %op, i32 31, i32 1)
+  // CHECK: ret i64 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqdech,_n_s64,,)(op, 1);
 }
 
-svuint16_t test_svqdech_pat_u16_vl3(svuint16_t op)
+uint32_t test_svqdech_n_u32(uint32_t op)
 {
-  // CHECK-LABEL: test_svqdech_pat_u16_vl3
-  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uqdech.nxv8i16(<vscale x 8 x i16> %op, i32 3, i32 16)
-  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
-  return SVE_ACLE_FUNC(svqdech_pat,_u16,,)(op, SV_VL3, 16);
+  // CHECK-LABEL: test_svqdech_n_u32
+  // CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.uqdech.n32(i32 %op, i32 31, i32 16)
+  // CHECK: ret i32 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqdech,_n_u32,,)(op, 16);
 }
 
-svuint16_t test_svqdech_pat_u16_vl4(svuint16_t op)
+uint64_t test_svqdech_n_u64(uint64_t op)
 {
-  // CHECK-LABEL: test_svqdech_pat_u16_vl4
-  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uqdech.nxv8i16(<vscale x 8 x i16> %op, i32 4, i32 16)
-  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
-  return SVE_ACLE_FUNC(svqdech_pat,_u16,,)(op, SV_VL4, 16);
+  // CHECK-LABEL: test_svqdech_n_u64
+  // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.uqdech.n64(i64 %op, i32 31, i32 1)
+  // CHECK: ret i64 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqdech,_n_u64,,)(op, 1);
 }
 
-svuint16_t test_svqdech_pat_u16_vl5(svuint16_t op)
+int32_t test_svqdech_pat_n_s32(int32_t op)
 {
-  // CHECK-LABEL: test_svqdech_pat_u16_vl5
-  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uqdech.nxv8i16(<vscale x 8 x i16> %op, i32 5, i32 16)
-  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
-  return SVE_ACLE_FUNC(svqdech_pat,_u16,,)(op, SV_VL5, 16);
+  // CHECK-LABEL: test_svqdech_pat_n_s32
+  // CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.sqdech.n32(i32 %op, i32 10, i32 16)
+  // CHECK: ret i32 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqdech_pat,_n_s32,,)(op, SV_VL32, 16);
 }
 
-svuint16_t test_svqdech_pat_u16_vl6(svuint16_t op)
+int64_t test_svqdech_pat_n_s64(int64_t op)
 {
-  // CHECK-LABEL: test_svqdech_pat_u16_vl6
-  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uqdech.nxv8i16(<vscale x 8 x i16> %op, i32 6, i32 16)
-  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
-  return SVE_ACLE_FUNC(svqdech_pat,_u16,,)(op, SV_VL6, 16);
+  // CHECK-LABEL: test_svqdech_pat_n_s64
+  // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.sqdech.n64(i64 %op, i32 11, i32 1)
+  // CHECK: ret i64 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqdech_pat,_n_s64,,)(op, SV_VL64, 1);
 }
 
-svuint16_t test_svqdech_pat_u16_vl7(svuint16_t op)
+uint32_t test_svqdech_pat_n_u32(uint32_t op)
 {
-  // CHECK-LABEL: test_svqdech_pat_u16_vl7
-  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uqdech.nxv8i16(<vscale x 8 x i16> %op, i32 7, i32 16)
-  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
-  return SVE_ACLE_FUNC(svqdech_pat,_u16,,)(op, SV_VL7, 16);
+  // CHECK-LABEL: test_svqdech_pat_n_u32
+  // CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.uqdech.n32(i32 %op, i32 12, i32 16)
+  // CHECK: ret i32 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqdech_pat,_n_u32,,)(op, SV_VL128, 16);
 }
 
-svuint16_t test_svqdech_pat_u16_vl8(svuint16_t op)
+uint64_t test_svqdech_pat_n_u64(uint64_t op)
 {
-  // CHECK-LABEL: test_svqdech_pat_u16_vl8
-  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uqdech.nxv8i16(<vscale x 8 x i16> %op, i32 8, i32 16)
-  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
-  return SVE_ACLE_FUNC(svqdech_pat,_u16,,)(op, SV_VL8, 16);
+  // CHECK-LABEL: test_svqdech_pat_n_u64
+  // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.uqdech.n64(i64 %op, i32 13, i32 1)
+  // CHECK: ret i64 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqdech_pat,_n_u64,,)(op, SV_VL256, 1);
 }
 
-svuint16_t test_svqdech_pat_u16_vl16(svuint16_t op)
+svint16_t test_svqdech_s16(svint16_t op)
 {
-  // CHECK-LABEL: test_svqdech_pat_u16_vl16
-  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uqdech.nxv8i16(<vscale x 8 x i16> %op, i32 9, i32 16)
-  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
-  return SVE_ACLE_FUNC(svqdech_pat,_u16,,)(op, SV_VL16, 16);
-}
-
-svuint16_t test_svqdech_pat_u16_vl32(svuint16_t op)
-{
-  // CHECK-LABEL: test_svqdech_pat_u16_vl32
-  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uqdech.nxv8i16(<vscale x 8 x i16> %op, i32 10, i32 16)
-  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
-  return SVE_ACLE_FUNC(svqdech_pat,_u16,,)(op, SV_VL32, 16);
-}
-
-svuint16_t test_svqdech_pat_u16_vl64(svuint16_t op)
-{
-  // CHECK-LABEL: test_svqdech_pat_u16_vl64
-  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uqdech.nxv8i16(<vscale x 8 x i16> %op, i32 11, i32 16)
-  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
-  return SVE_ACLE_FUNC(svqdech_pat,_u16,,)(op, SV_VL64, 16);
-}
-
-svuint16_t test_svqdech_pat_u16_vl128(svuint16_t op)
-{
-  // CHECK-LABEL: test_svqdech_pat_u16_vl128
-  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uqdech.nxv8i16(<vscale x 8 x i16> %op, i32 12, i32 16)
-  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
-  return SVE_ACLE_FUNC(svqdech_pat,_u16,,)(op, SV_VL128, 16);
-}
-
-svuint16_t test_svqdech_pat_u16_vl256(svuint16_t op)
-{
-  // CHECK-LABEL: test_svqdech_pat_u16_vl256
-  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uqdech.nxv8i16(<vscale x 8 x i16> %op, i32 13, i32 16)
+  // CHECK-LABEL: test_svqdech_s16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqdech.nxv8i16(<vscale x 8 x i16> %op, i32 31, i32 16)
   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
-  return SVE_ACLE_FUNC(svqdech_pat,_u16,,)(op, SV_VL256, 16);
+  return SVE_ACLE_FUNC(svqdech,_s16,,)(op, 16);
 }
 
-svuint16_t test_svqdech_pat_u16_mul4(svuint16_t op)
+svuint16_t test_svqdech_u16(svuint16_t op)
 {
-  // CHECK-LABEL: test_svqdech_pat_u16_mul4
-  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uqdech.nxv8i16(<vscale x 8 x i16> %op, i32 29, i32 16)
+  // CHECK-LABEL: test_svqdech_u16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uqdech.nxv8i16(<vscale x 8 x i16> %op, i32 31, i32 1)
   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
-  return SVE_ACLE_FUNC(svqdech_pat,_u16,,)(op, SV_MUL4, 16);
+  return SVE_ACLE_FUNC(svqdech,_u16,,)(op, 1);
 }
 
-svuint16_t test_svqdech_pat_u16_mul3(svuint16_t op)
+svint16_t test_svqdech_pat_s16(svint16_t op)
 {
-  // CHECK-LABEL: test_svqdech_pat_u16_mul3
-  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uqdech.nxv8i16(<vscale x 8 x i16> %op, i32 30, i32 16)
+  // CHECK-LABEL: test_svqdech_pat_s16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqdech.nxv8i16(<vscale x 8 x i16> %op, i32 29, i32 16)
   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
-  return SVE_ACLE_FUNC(svqdech_pat,_u16,,)(op, SV_MUL3, 16);
+  return SVE_ACLE_FUNC(svqdech_pat,_s16,,)(op, SV_MUL4, 16);
 }
 
-svuint16_t test_svqdech_pat_u16_all(svuint16_t op)
+svuint16_t test_svqdech_pat_u16(svuint16_t op)
 {
-  // CHECK-LABEL: test_svqdech_pat_u16_all
-  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uqdech.nxv8i16(<vscale x 8 x i16> %op, i32 31, i32 16)
+  // CHECK-LABEL: test_svqdech_pat_u16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uqdech.nxv8i16(<vscale x 8 x i16> %op, i32 30, i32 1)
   // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
-  return SVE_ACLE_FUNC(svqdech_pat,_u16,,)(op, SV_ALL, 16);
+  return SVE_ACLE_FUNC(svqdech_pat,_u16,,)(op, SV_MUL3, 1);
 }

diff  --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_qdecw.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_qdecw.c
new file mode 100644
index 000000000000..ec68a36ac110
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_qdecw.c
@@ -0,0 +1,115 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+int32_t test_svqdecw_n_s32(int32_t op)
+{
+  // CHECK-LABEL: test_svqdecw_n_s32
+  // CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.sqdecw.n32(i32 %op, i32 31, i32 1)
+  // CHECK: ret i32 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqdecw,_n_s32,,)(op, 1);
+}
+
+int32_t test_svqdecw_n_s32_1(int32_t op)
+{
+  // CHECK-LABEL: test_svqdecw_n_s32_1
+  // CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.sqdecw.n32(i32 %op, i32 31, i32 16)
+  // CHECK: ret i32 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqdecw,_n_s32,,)(op, 16);
+}
+
+int64_t test_svqdecw_n_s64(int64_t op)
+{
+  // CHECK-LABEL: test_svqdecw_n_s64
+  // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.sqdecw.n64(i64 %op, i32 31, i32 1)
+  // CHECK: ret i64 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqdecw,_n_s64,,)(op, 1);
+}
+
+uint32_t test_svqdecw_n_u32(uint32_t op)
+{
+  // CHECK-LABEL: test_svqdecw_n_u32
+  // CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.uqdecw.n32(i32 %op, i32 31, i32 16)
+  // CHECK: ret i32 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqdecw,_n_u32,,)(op, 16);
+}
+
+uint64_t test_svqdecw_n_u64(uint64_t op)
+{
+  // CHECK-LABEL: test_svqdecw_n_u64
+  // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.uqdecw.n64(i64 %op, i32 31, i32 1)
+  // CHECK: ret i64 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqdecw,_n_u64,,)(op, 1);
+}
+
+int32_t test_svqdecw_pat_n_s32(int32_t op)
+{
+  // CHECK-LABEL: test_svqdecw_pat_n_s32
+  // CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.sqdecw.n32(i32 %op, i32 31, i32 16)
+  // CHECK: ret i32 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqdecw_pat,_n_s32,,)(op, SV_ALL, 16);
+}
+
+int64_t test_svqdecw_pat_n_s64(int64_t op)
+{
+  // CHECK-LABEL: test_svqdecw_pat_n_s64
+  // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.sqdecw.n64(i64 %op, i32 0, i32 1)
+  // CHECK: ret i64 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqdecw_pat,_n_s64,,)(op, SV_POW2, 1);
+}
+
+uint32_t test_svqdecw_pat_n_u32(uint32_t op)
+{
+  // CHECK-LABEL: test_svqdecw_pat_n_u32
+  // CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.uqdecw.n32(i32 %op, i32 1, i32 16)
+  // CHECK: ret i32 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqdecw_pat,_n_u32,,)(op, SV_VL1, 16);
+}
+
+uint64_t test_svqdecw_pat_n_u64(uint64_t op)
+{
+  // CHECK-LABEL: test_svqdecw_pat_n_u64
+  // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.uqdecw.n64(i64 %op, i32 2, i32 1)
+  // CHECK: ret i64 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqdecw_pat,_n_u64,,)(op, SV_VL2, 1);
+}
+
+svint32_t test_svqdecw_s32(svint32_t op)
+{
+  // CHECK-LABEL: test_svqdecw_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdecw.nxv4i32(<vscale x 4 x i32> %op, i32 31, i32 16)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqdecw,_s32,,)(op, 16);
+}
+
+svuint32_t test_svqdecw_u32(svuint32_t op)
+{
+  // CHECK-LABEL: test_svqdecw_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uqdecw.nxv4i32(<vscale x 4 x i32> %op, i32 31, i32 1)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqdecw,_u32,,)(op, 1);
+}
+
+svint32_t test_svqdecw_pat_s32(svint32_t op)
+{
+  // CHECK-LABEL: test_svqdecw_pat_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdecw.nxv4i32(<vscale x 4 x i32> %op, i32 3, i32 16)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqdecw_pat,_s32,,)(op, SV_VL3, 16);
+}
+
+svuint32_t test_svqdecw_pat_u32(svuint32_t op)
+{
+  // CHECK-LABEL: test_svqdecw_pat_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uqdecw.nxv4i32(<vscale x 4 x i32> %op, i32 4, i32 1)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqdecw_pat,_u32,,)(op, SV_VL4, 1);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_qincb.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_qincb.c
new file mode 100644
index 000000000000..db58ac82203c
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_qincb.c
@@ -0,0 +1,83 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+int32_t test_svqincb_n_s32(int32_t op)
+{
+  // CHECK-LABEL: test_svqincb_n_s32
+  // CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.sqincb.n32(i32 %op, i32 31, i32 1)
+  // CHECK: ret i32 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqincb,_n_s32,,)(op, 1);
+}
+
+int32_t test_svqincb_n_s32_1(int32_t op)
+{
+  // CHECK-LABEL: test_svqincb_n_s32_1
+  // CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.sqincb.n32(i32 %op, i32 31, i32 16)
+  // CHECK: ret i32 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqincb,_n_s32,,)(op, 16);
+}
+
+int64_t test_svqincb_n_s64(int64_t op)
+{
+  // CHECK-LABEL: test_svqincb_n_s64
+  // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.sqincb.n64(i64 %op, i32 31, i32 1)
+  // CHECK: ret i64 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqincb,_n_s64,,)(op, 1);
+}
+
+uint32_t test_svqincb_n_u32(uint32_t op)
+{
+  // CHECK-LABEL: test_svqincb_n_u32
+  // CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.uqincb.n32(i32 %op, i32 31, i32 16)
+  // CHECK: ret i32 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqincb,_n_u32,,)(op, 16);
+}
+
+uint64_t test_svqincb_n_u64(uint64_t op)
+{
+  // CHECK-LABEL: test_svqincb_n_u64
+  // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.uqincb.n64(i64 %op, i32 31, i32 1)
+  // CHECK: ret i64 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqincb,_n_u64,,)(op, 1);
+}
+
+int32_t test_svqincb_pat_n_s32(int32_t op)
+{
+  // CHECK-LABEL: test_svqincb_pat_n_s32
+  // CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.sqincb.n32(i32 %op, i32 5, i32 16)
+  // CHECK: ret i32 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqincb_pat,_n_s32,,)(op, SV_VL5, 16);
+}
+
+int64_t test_svqincb_pat_n_s64(int64_t op)
+{
+  // CHECK-LABEL: test_svqincb_pat_n_s64
+  // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.sqincb.n64(i64 %op, i32 6, i32 1)
+  // CHECK: ret i64 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqincb_pat,_n_s64,,)(op, SV_VL6, 1);
+}
+
+uint32_t test_svqincb_pat_n_u32(uint32_t op)
+{
+  // CHECK-LABEL: test_svqincb_pat_n_u32
+  // CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.uqincb.n32(i32 %op, i32 7, i32 16)
+  // CHECK: ret i32 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqincb_pat,_n_u32,,)(op, SV_VL7, 16);
+}
+
+uint64_t test_svqincb_pat_n_u64(uint64_t op)
+{
+  // CHECK-LABEL: test_svqincb_pat_n_u64
+  // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.uqincb.n64(i64 %op, i32 8, i32 1)
+  // CHECK: ret i64 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqincb_pat,_n_u64,,)(op, SV_VL8, 1);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_qincd.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_qincd.c
new file mode 100644
index 000000000000..17590971e530
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_qincd.c
@@ -0,0 +1,115 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+int32_t test_svqincd_n_s32(int32_t op)
+{
+  // CHECK-LABEL: test_svqincd_n_s32
+  // CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.sqincd.n32(i32 %op, i32 31, i32 1)
+  // CHECK: ret i32 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqincd,_n_s32,,)(op, 1);
+}
+
+int32_t test_svqincd_n_s32_1(int32_t op)
+{
+  // CHECK-LABEL: test_svqincd_n_s32_1
+  // CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.sqincd.n32(i32 %op, i32 31, i32 16)
+  // CHECK: ret i32 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqincd,_n_s32,,)(op, 16);
+}
+
+int64_t test_svqincd_n_s64(int64_t op)
+{
+  // CHECK-LABEL: test_svqincd_n_s64
+  // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.sqincd.n64(i64 %op, i32 31, i32 1)
+  // CHECK: ret i64 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqincd,_n_s64,,)(op, 1);
+}
+
+uint32_t test_svqincd_n_u32(uint32_t op)
+{
+  // CHECK-LABEL: test_svqincd_n_u32
+  // CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.uqincd.n32(i32 %op, i32 31, i32 16)
+  // CHECK: ret i32 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqincd,_n_u32,,)(op, 16);
+}
+
+uint64_t test_svqincd_n_u64(uint64_t op)
+{
+  // CHECK-LABEL: test_svqincd_n_u64
+  // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.uqincd.n64(i64 %op, i32 31, i32 1)
+  // CHECK: ret i64 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqincd,_n_u64,,)(op, 1);
+}
+
+int32_t test_svqincd_pat_n_s32(int32_t op)
+{
+  // CHECK-LABEL: test_svqincd_pat_n_s32
+  // CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.sqincd.n32(i32 %op, i32 9, i32 16)
+  // CHECK: ret i32 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqincd_pat,_n_s32,,)(op, SV_VL16, 16);
+}
+
+int64_t test_svqincd_pat_n_s64(int64_t op)
+{
+  // CHECK-LABEL: test_svqincd_pat_n_s64
+  // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.sqincd.n64(i64 %op, i32 10, i32 1)
+  // CHECK: ret i64 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqincd_pat,_n_s64,,)(op, SV_VL32, 1);
+}
+
+uint32_t test_svqincd_pat_n_u32(uint32_t op)
+{
+  // CHECK-LABEL: test_svqincd_pat_n_u32
+  // CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.uqincd.n32(i32 %op, i32 11, i32 16)
+  // CHECK: ret i32 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqincd_pat,_n_u32,,)(op, SV_VL64, 16);
+}
+
+uint64_t test_svqincd_pat_n_u64(uint64_t op)
+{
+  // CHECK-LABEL: test_svqincd_pat_n_u64
+  // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.uqincd.n64(i64 %op, i32 12, i32 1)
+  // CHECK: ret i64 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqincd_pat,_n_u64,,)(op, SV_VL128, 1);
+}
+
+svint64_t test_svqincd_s64(svint64_t op)
+{
+  // CHECK-LABEL: test_svqincd_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqincd.nxv2i64(<vscale x 2 x i64> %op, i32 31, i32 16)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqincd,_s64,,)(op, 16);
+}
+
+svuint64_t test_svqincd_u64(svuint64_t op)
+{
+  // CHECK-LABEL: test_svqincd_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uqincd.nxv2i64(<vscale x 2 x i64> %op, i32 31, i32 1)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqincd,_u64,,)(op, 1);
+}
+
+svint64_t test_svqincd_pat_s64(svint64_t op)
+{
+  // CHECK-LABEL: test_svqincd_pat_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqincd.nxv2i64(<vscale x 2 x i64> %op, i32 13, i32 16)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqincd_pat,_s64,,)(op, SV_VL256, 16);
+}
+
+svuint64_t test_svqincd_pat_u64(svuint64_t op)
+{
+  // CHECK-LABEL: test_svqincd_pat_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uqincd.nxv2i64(<vscale x 2 x i64> %op, i32 29, i32 1)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqincd_pat,_u64,,)(op, SV_MUL4, 1);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_qinch.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_qinch.c
new file mode 100644
index 000000000000..f1c34e8c420b
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_qinch.c
@@ -0,0 +1,115 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+int32_t test_svqinch_n_s32(int32_t op)
+{
+  // CHECK-LABEL: test_svqinch_n_s32
+  // CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.sqinch.n32(i32 %op, i32 31, i32 1)
+  // CHECK: ret i32 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqinch,_n_s32,,)(op, 1);
+}
+
+int32_t test_svqinch_n_s32_1(int32_t op)
+{
+  // CHECK-LABEL: test_svqinch_n_s32_1
+  // CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.sqinch.n32(i32 %op, i32 31, i32 16)
+  // CHECK: ret i32 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqinch,_n_s32,,)(op, 16);
+}
+
+int64_t test_svqinch_n_s64(int64_t op)
+{
+  // CHECK-LABEL: test_svqinch_n_s64
+  // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.sqinch.n64(i64 %op, i32 31, i32 1)
+  // CHECK: ret i64 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqinch,_n_s64,,)(op, 1);
+}
+
+uint32_t test_svqinch_n_u32(uint32_t op)
+{
+  // CHECK-LABEL: test_svqinch_n_u32
+  // CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.uqinch.n32(i32 %op, i32 31, i32 16)
+  // CHECK: ret i32 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqinch,_n_u32,,)(op, 16);
+}
+
+uint64_t test_svqinch_n_u64(uint64_t op)
+{
+  // CHECK-LABEL: test_svqinch_n_u64
+  // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.uqinch.n64(i64 %op, i32 31, i32 1)
+  // CHECK: ret i64 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqinch,_n_u64,,)(op, 1);
+}
+
+int32_t test_svqinch_pat_n_s32(int32_t op)
+{
+  // CHECK-LABEL: test_svqinch_pat_n_s32
+  // CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.sqinch.n32(i32 %op, i32 30, i32 16)
+  // CHECK: ret i32 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqinch_pat,_n_s32,,)(op, SV_MUL3, 16);
+}
+
+int64_t test_svqinch_pat_n_s64(int64_t op)
+{
+  // CHECK-LABEL: test_svqinch_pat_n_s64
+  // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.sqinch.n64(i64 %op, i32 31, i32 1)
+  // CHECK: ret i64 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqinch_pat,_n_s64,,)(op, SV_ALL, 1);
+}
+
+uint32_t test_svqinch_pat_n_u32(uint32_t op)
+{
+  // CHECK-LABEL: test_svqinch_pat_n_u32
+  // CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.uqinch.n32(i32 %op, i32 0, i32 16)
+  // CHECK: ret i32 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqinch_pat,_n_u32,,)(op, SV_POW2, 16);
+}
+
+uint64_t test_svqinch_pat_n_u64(uint64_t op)
+{
+  // CHECK-LABEL: test_svqinch_pat_n_u64
+  // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.uqinch.n64(i64 %op, i32 1, i32 1)
+  // CHECK: ret i64 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqinch_pat,_n_u64,,)(op, SV_VL1, 1);
+}
+
+svint16_t test_svqinch_s16(svint16_t op)
+{
+  // CHECK-LABEL: test_svqinch_s16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqinch.nxv8i16(<vscale x 8 x i16> %op, i32 31, i32 16)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqinch,_s16,,)(op, 16);
+}
+
+svuint16_t test_svqinch_u16(svuint16_t op)
+{
+  // CHECK-LABEL: test_svqinch_u16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uqinch.nxv8i16(<vscale x 8 x i16> %op, i32 31, i32 1)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqinch,_u16,,)(op, 1);
+}
+
+svint16_t test_svqinch_pat_s16(svint16_t op)
+{
+  // CHECK-LABEL: test_svqinch_pat_s16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqinch.nxv8i16(<vscale x 8 x i16> %op, i32 2, i32 16)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqinch_pat,_s16,,)(op, SV_VL2, 16);
+}
+
+svuint16_t test_svqinch_pat_u16(svuint16_t op)
+{
+  // CHECK-LABEL: test_svqinch_pat_u16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uqinch.nxv8i16(<vscale x 8 x i16> %op, i32 3, i32 1)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqinch_pat,_u16,,)(op, SV_VL3, 1);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_qincw.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_qincw.c
new file mode 100644
index 000000000000..74202c0cc214
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_qincw.c
@@ -0,0 +1,115 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+int32_t test_svqincw_n_s32(int32_t op)
+{
+  // CHECK-LABEL: test_svqincw_n_s32
+  // CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.sqincw.n32(i32 %op, i32 31, i32 1)
+  // CHECK: ret i32 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqincw,_n_s32,,)(op, 1);
+}
+
+int32_t test_svqincw_n_s32_1(int32_t op)
+{
+  // CHECK-LABEL: test_svqincw_n_s32_1
+  // CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.sqincw.n32(i32 %op, i32 31, i32 16)
+  // CHECK: ret i32 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqincw,_n_s32,,)(op, 16);
+}
+
+int64_t test_svqincw_n_s64(int64_t op)
+{
+  // CHECK-LABEL: test_svqincw_n_s64
+  // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.sqincw.n64(i64 %op, i32 31, i32 1)
+  // CHECK: ret i64 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqincw,_n_s64,,)(op, 1);
+}
+
+uint32_t test_svqincw_n_u32(uint32_t op)
+{
+  // CHECK-LABEL: test_svqincw_n_u32
+  // CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.uqincw.n32(i32 %op, i32 31, i32 16)
+  // CHECK: ret i32 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqincw,_n_u32,,)(op, 16);
+}
+
+uint64_t test_svqincw_n_u64(uint64_t op)
+{
+  // CHECK-LABEL: test_svqincw_n_u64
+  // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.uqincw.n64(i64 %op, i32 31, i32 1)
+  // CHECK: ret i64 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqincw,_n_u64,,)(op, 1);
+}
+
+int32_t test_svqincw_pat_n_s32(int32_t op)
+{
+  // CHECK-LABEL: test_svqincw_pat_n_s32
+  // CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.sqincw.n32(i32 %op, i32 4, i32 16)
+  // CHECK: ret i32 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqincw_pat,_n_s32,,)(op, SV_VL4, 16);
+}
+
+int64_t test_svqincw_pat_n_s64(int64_t op)
+{
+  // CHECK-LABEL: test_svqincw_pat_n_s64
+  // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.sqincw.n64(i64 %op, i32 5, i32 1)
+  // CHECK: ret i64 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqincw_pat,_n_s64,,)(op, SV_VL5, 1);
+}
+
+uint32_t test_svqincw_pat_n_u32(uint32_t op)
+{
+  // CHECK-LABEL: test_svqincw_pat_n_u32
+  // CHECK: %[[INTRINSIC:.*]] = call i32 @llvm.aarch64.sve.uqincw.n32(i32 %op, i32 6, i32 16)
+  // CHECK: ret i32 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqincw_pat,_n_u32,,)(op, SV_VL6, 16);
+}
+
+uint64_t test_svqincw_pat_n_u64(uint64_t op)
+{
+  // CHECK-LABEL: test_svqincw_pat_n_u64
+  // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.uqincw.n64(i64 %op, i32 7, i32 1)
+  // CHECK: ret i64 %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqincw_pat,_n_u64,,)(op, SV_VL7, 1);
+}
+
+svint32_t test_svqincw_s32(svint32_t op)
+{
+  // CHECK-LABEL: test_svqincw_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqincw.nxv4i32(<vscale x 4 x i32> %op, i32 31, i32 16)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqincw,_s32,,)(op, 16);
+}
+
+svuint32_t test_svqincw_u32(svuint32_t op)
+{
+  // CHECK-LABEL: test_svqincw_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uqincw.nxv4i32(<vscale x 4 x i32> %op, i32 31, i32 1)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqincw,_u32,,)(op, 1);
+}
+
+svint32_t test_svqincw_pat_s32(svint32_t op)
+{
+  // CHECK-LABEL: test_svqincw_pat_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqincw.nxv4i32(<vscale x 4 x i32> %op, i32 8, i32 16)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqincw_pat,_s32,,)(op, SV_VL8, 16);
+}
+
+svuint32_t test_svqincw_pat_u32(svuint32_t op)
+{
+  // CHECK-LABEL: test_svqincw_pat_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uqincw.nxv4i32(<vscale x 4 x i32> %op, i32 9, i32 1)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svqincw_pat,_u32,,)(op, SV_VL16, 1);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_qdecb.c b/clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_qdecb.c
new file mode 100644
index 000000000000..b8ac4ac5aa6a
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_qdecb.c
@@ -0,0 +1,107 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify %s
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+#include <arm_sve.h>
+
+int32_t test_svqdecb_n_s32(int32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecb,_n_s32,,)(op, 0);
+}
+
+int32_t test_svqdecb_n_s32_1(int32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecb,_n_s32,,)(op, 17);
+}
+
+int64_t test_svqdecb_n_s64(int64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecb,_n_s64,,)(op, 0);
+}
+
+int64_t test_svqdecb_n_s64_1(int64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecb,_n_s64,,)(op, 17);
+}
+
+uint32_t test_svqdecb_n_u32(uint32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecb,_n_u32,,)(op, 0);
+}
+
+uint32_t test_svqdecb_n_u32_1(uint32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecb,_n_u32,,)(op, 17);
+}
+
+uint64_t test_svqdecb_n_u64(uint64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecb,_n_u64,,)(op, 0);
+}
+
+uint64_t test_svqdecb_n_u64_1(uint64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecb,_n_u64,,)(op, 17);
+}
+
+int32_t test_svqdecb_pat_n_s32(int32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecb_pat,_n_s32,,)(op, SV_POW2, 0);
+}
+
+int32_t test_svqdecb_pat_n_s32_1(int32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecb_pat,_n_s32,,)(op, SV_VL1, 17);
+}
+
+int64_t test_svqdecb_pat_n_s64(int64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecb_pat,_n_s64,,)(op, SV_VL2, 0);
+}
+
+int64_t test_svqdecb_pat_n_s64_1(int64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecb_pat,_n_s64,,)(op, SV_VL3, 17);
+}
+
+uint32_t test_svqdecb_pat_n_u32(uint32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecb_pat,_n_u32,,)(op, SV_VL4, 0);
+}
+
+uint32_t test_svqdecb_pat_n_u32_1(uint32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecb_pat,_n_u32,,)(op, SV_VL5, 17);
+}
+
+uint64_t test_svqdecb_pat_n_u64(uint64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecb_pat,_n_u64,,)(op, SV_VL6, 0);
+}
+
+uint64_t test_svqdecb_pat_n_u64_1(uint64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecb_pat,_n_u64,,)(op, SV_VL7, 17);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_qdecd.c b/clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_qdecd.c
new file mode 100644
index 000000000000..db380e7dc83c
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_qdecd.c
@@ -0,0 +1,155 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify %s
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+#include <arm_sve.h>
+
+int32_t test_svqdecd_n_s32(int32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecd,_n_s32,,)(op, 0);
+}
+
+int32_t test_svqdecd_n_s32_1(int32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecd,_n_s32,,)(op, 17);
+}
+
+int64_t test_svqdecd_n_s64(int64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecd,_n_s64,,)(op, 0);
+}
+
+int64_t test_svqdecd_n_s64_1(int64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecd,_n_s64,,)(op, 17);
+}
+
+uint32_t test_svqdecd_n_u32(uint32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecd,_n_u32,,)(op, 0);
+}
+
+uint32_t test_svqdecd_n_u32_1(uint32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecd,_n_u32,,)(op, 17);
+}
+
+uint64_t test_svqdecd_n_u64(uint64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecd,_n_u64,,)(op, 0);
+}
+
+uint64_t test_svqdecd_n_u64_1(uint64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecd,_n_u64,,)(op, 17);
+}
+
+int32_t test_svqdecd_pat_n_s32(int32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecd_pat,_n_s32,,)(op, SV_POW2, 0);
+}
+
+int32_t test_svqdecd_pat_n_s32_1(int32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecd_pat,_n_s32,,)(op, SV_VL1, 17);
+}
+
+int64_t test_svqdecd_pat_n_s64(int64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecd_pat,_n_s64,,)(op, SV_VL2, 0);
+}
+
+int64_t test_svqdecd_pat_n_s64_1(int64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecd_pat,_n_s64,,)(op, SV_VL3, 17);
+}
+
+uint32_t test_svqdecd_pat_n_u32(uint32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecd_pat,_n_u32,,)(op, SV_VL4, 0);
+}
+
+uint32_t test_svqdecd_pat_n_u32_1(uint32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecd_pat,_n_u32,,)(op, SV_VL5, 17);
+}
+
+uint64_t test_svqdecd_pat_n_u64(uint64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecd_pat,_n_u64,,)(op, SV_VL6, 0);
+}
+
+uint64_t test_svqdecd_pat_n_u64_1(uint64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecd_pat,_n_u64,,)(op, SV_VL7, 17);
+}
+
+svint64_t test_svqdecd_s64(svint64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecd,_s64,,)(op, 0);
+}
+
+svint64_t test_svqdecd_s64_1(svint64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecd,_s64,,)(op, 17);
+}
+
+svuint64_t test_svqdecd_u64(svuint64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecd,_u64,,)(op, 0);
+}
+
+svuint64_t test_svqdecd_u64_1(svuint64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecd,_u64,,)(op, 17);
+}
+
+svint64_t test_svqdecd_pat_s64(svint64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecd_pat,_s64,,)(op, SV_VL8, 0);
+}
+
+svint64_t test_svqdecd_pat_s64_1(svint64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecd_pat,_s64,,)(op, SV_VL16, 17);
+}
+
+svuint64_t test_svqdecd_pat_u64(svuint64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecd_pat,_u64,,)(op, SV_VL32, 0);
+}
+
+svuint64_t test_svqdecd_pat_u64_1(svuint64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecd_pat,_u64,,)(op, SV_VL64, 17);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_qdech.c b/clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_qdech.c
index 71ecd73fa403..daa87e1c2df7 100644
--- a/clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_qdech.c
+++ b/clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_qdech.c
@@ -10,26 +10,146 @@
 
 #include <arm_sve.h>
 
+int32_t test_svqdech_n_s32(int32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdech,_n_s32,,)(op, 0);
+}
+
+int32_t test_svqdech_n_s32_1(int32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdech,_n_s32,,)(op, 17);
+}
+
+int64_t test_svqdech_n_s64(int64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdech,_n_s64,,)(op, 0);
+}
+
+int64_t test_svqdech_n_s64_1(int64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdech,_n_s64,,)(op, 17);
+}
+
+uint32_t test_svqdech_n_u32(uint32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdech,_n_u32,,)(op, 0);
+}
+
+uint32_t test_svqdech_n_u32_1(uint32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdech,_n_u32,,)(op, 17);
+}
+
+uint64_t test_svqdech_n_u64(uint64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdech,_n_u64,,)(op, 0);
+}
+
+uint64_t test_svqdech_n_u64_1(uint64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdech,_n_u64,,)(op, 17);
+}
+
+int32_t test_svqdech_pat_n_s32(int32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdech_pat,_n_s32,,)(op, SV_POW2, 0);
+}
+
+int32_t test_svqdech_pat_n_s32_1(int32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdech_pat,_n_s32,,)(op, SV_VL1, 17);
+}
+
+int64_t test_svqdech_pat_n_s64(int64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdech_pat,_n_s64,,)(op, SV_VL2, 0);
+}
+
+int64_t test_svqdech_pat_n_s64_1(int64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdech_pat,_n_s64,,)(op, SV_VL3, 17);
+}
+
+uint32_t test_svqdech_pat_n_u32(uint32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdech_pat,_n_u32,,)(op, SV_VL4, 0);
+}
+
+uint32_t test_svqdech_pat_n_u32_1(uint32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdech_pat,_n_u32,,)(op, SV_VL5, 17);
+}
+
+uint64_t test_svqdech_pat_n_u64(uint64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdech_pat,_n_u64,,)(op, SV_VL6, 0);
+}
+
+uint64_t test_svqdech_pat_n_u64_1(uint64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdech_pat,_n_u64,,)(op, SV_VL7, 17);
+}
+
+svint16_t test_svqdech_s16(svint16_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdech,_s16,,)(op, 0);
+}
+
+svint16_t test_svqdech_s16_1(svint16_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdech,_s16,,)(op, 17);
+}
+
+svuint16_t test_svqdech_u16(svuint16_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdech,_u16,,)(op, 0);
+}
+
+svuint16_t test_svqdech_u16_1(svuint16_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdech,_u16,,)(op, 17);
+}
+
 svint16_t test_svqdech_pat_s16(svint16_t op)
 {
-  // expected-error at +1 {{argument value 0 is outside the valid range [1, 16]}}
-  return svqdech_pat_s16(op, SV_VL8, 0);
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdech_pat,_s16,,)(op, SV_VL8, 0);
 }
 
-svint16_t test_svqdech_pat_s16_2(svint16_t op)
+svint16_t test_svqdech_pat_s16_1(svint16_t op)
 {
-  // expected-error at +1 {{argument value 17 is outside the valid range [1, 16]}}
-  return svqdech_pat_s16(op, SV_VL16, 17);
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdech_pat,_s16,,)(op, SV_VL16, 17);
 }
 
 svuint16_t test_svqdech_pat_u16(svuint16_t op)
 {
-  // expected-error at +1 {{argument value 0 is outside the valid range [1, 16]}}
-  return svqdech_pat_u16(op, SV_VL32, 0);
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdech_pat,_u16,,)(op, SV_VL32, 0);
 }
 
-svuint16_t test_svqdech_pat_u16_2(svuint16_t op)
+svuint16_t test_svqdech_pat_u16_1(svuint16_t op)
 {
-  // expected-error at +1 {{argument value 17 is outside the valid range [1, 16]}}
-  return svqdech_pat_u16(op, SV_VL64, 17);
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdech_pat,_u16,,)(op, SV_VL64, 17);
 }

diff  --git a/clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_qdecw.c b/clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_qdecw.c
new file mode 100644
index 000000000000..23705457131f
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_qdecw.c
@@ -0,0 +1,155 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify %s
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+#include <arm_sve.h>
+
+int32_t test_svqdecw_n_s32(int32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecw,_n_s32,,)(op, 0);
+}
+
+int32_t test_svqdecw_n_s32_1(int32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecw,_n_s32,,)(op, 17);
+}
+
+int64_t test_svqdecw_n_s64(int64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecw,_n_s64,,)(op, 0);
+}
+
+int64_t test_svqdecw_n_s64_1(int64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecw,_n_s64,,)(op, 17);
+}
+
+uint32_t test_svqdecw_n_u32(uint32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecw,_n_u32,,)(op, 0);
+}
+
+uint32_t test_svqdecw_n_u32_1(uint32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecw,_n_u32,,)(op, 17);
+}
+
+uint64_t test_svqdecw_n_u64(uint64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecw,_n_u64,,)(op, 0);
+}
+
+uint64_t test_svqdecw_n_u64_1(uint64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecw,_n_u64,,)(op, 17);
+}
+
+int32_t test_svqdecw_pat_n_s32(int32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecw_pat,_n_s32,,)(op, SV_POW2, 0);
+}
+
+int32_t test_svqdecw_pat_n_s32_1(int32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecw_pat,_n_s32,,)(op, SV_VL1, 17);
+}
+
+int64_t test_svqdecw_pat_n_s64(int64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecw_pat,_n_s64,,)(op, SV_VL2, 0);
+}
+
+int64_t test_svqdecw_pat_n_s64_1(int64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecw_pat,_n_s64,,)(op, SV_VL3, 17);
+}
+
+uint32_t test_svqdecw_pat_n_u32(uint32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecw_pat,_n_u32,,)(op, SV_VL4, 0);
+}
+
+uint32_t test_svqdecw_pat_n_u32_1(uint32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecw_pat,_n_u32,,)(op, SV_VL5, 17);
+}
+
+uint64_t test_svqdecw_pat_n_u64(uint64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecw_pat,_n_u64,,)(op, SV_VL6, 0);
+}
+
+uint64_t test_svqdecw_pat_n_u64_1(uint64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecw_pat,_n_u64,,)(op, SV_VL7, 17);
+}
+
+svint32_t test_svqdecw_s32(svint32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecw,_s32,,)(op, 0);
+}
+
+svint32_t test_svqdecw_s32_1(svint32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecw,_s32,,)(op, 17);
+}
+
+svuint32_t test_svqdecw_u32(svuint32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecw,_u32,,)(op, 0);
+}
+
+svuint32_t test_svqdecw_u32_1(svuint32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecw,_u32,,)(op, 17);
+}
+
+svint32_t test_svqdecw_pat_s32(svint32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecw_pat,_s32,,)(op, SV_VL8, 0);
+}
+
+svint32_t test_svqdecw_pat_s32_1(svint32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecw_pat,_s32,,)(op, SV_VL16, 17);
+}
+
+svuint32_t test_svqdecw_pat_u32(svuint32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecw_pat,_u32,,)(op, SV_VL32, 0);
+}
+
+svuint32_t test_svqdecw_pat_u32_1(svuint32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqdecw_pat,_u32,,)(op, SV_VL64, 17);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_qincb.c b/clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_qincb.c
new file mode 100644
index 000000000000..c4d9b473ebd5
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_qincb.c
@@ -0,0 +1,107 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify %s
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+#include <arm_sve.h>
+
+int32_t test_svqincb_n_s32(int32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincb,_n_s32,,)(op, 0);
+}
+
+int32_t test_svqincb_n_s32_1(int32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincb,_n_s32,,)(op, 17);
+}
+
+int64_t test_svqincb_n_s64(int64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincb,_n_s64,,)(op, 0);
+}
+
+int64_t test_svqincb_n_s64_1(int64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincb,_n_s64,,)(op, 17);
+}
+
+uint32_t test_svqincb_n_u32(uint32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincb,_n_u32,,)(op, 0);
+}
+
+uint32_t test_svqincb_n_u32_1(uint32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincb,_n_u32,,)(op, 17);
+}
+
+uint64_t test_svqincb_n_u64(uint64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincb,_n_u64,,)(op, 0);
+}
+
+uint64_t test_svqincb_n_u64_1(uint64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincb,_n_u64,,)(op, 17);
+}
+
+int32_t test_svqincb_pat_n_s32(int32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincb_pat,_n_s32,,)(op, SV_POW2, 0);
+}
+
+int32_t test_svqincb_pat_n_s32_1(int32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincb_pat,_n_s32,,)(op, SV_VL1, 17);
+}
+
+int64_t test_svqincb_pat_n_s64(int64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincb_pat,_n_s64,,)(op, SV_VL2, 0);
+}
+
+int64_t test_svqincb_pat_n_s64_1(int64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincb_pat,_n_s64,,)(op, SV_VL3, 17);
+}
+
+uint32_t test_svqincb_pat_n_u32(uint32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincb_pat,_n_u32,,)(op, SV_VL4, 0);
+}
+
+uint32_t test_svqincb_pat_n_u32_1(uint32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincb_pat,_n_u32,,)(op, SV_VL5, 17);
+}
+
+uint64_t test_svqincb_pat_n_u64(uint64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincb_pat,_n_u64,,)(op, SV_VL6, 0);
+}
+
+uint64_t test_svqincb_pat_n_u64_1(uint64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincb_pat,_n_u64,,)(op, SV_VL7, 17);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_qincd.c b/clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_qincd.c
new file mode 100644
index 000000000000..1905778e97ac
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_qincd.c
@@ -0,0 +1,155 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify %s
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+#include <arm_sve.h>
+
+int32_t test_svqincd_n_s32(int32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincd,_n_s32,,)(op, 0);
+}
+
+int32_t test_svqincd_n_s32_1(int32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincd,_n_s32,,)(op, 17);
+}
+
+int64_t test_svqincd_n_s64(int64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincd,_n_s64,,)(op, 0);
+}
+
+int64_t test_svqincd_n_s64_1(int64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincd,_n_s64,,)(op, 17);
+}
+
+uint32_t test_svqincd_n_u32(uint32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincd,_n_u32,,)(op, 0);
+}
+
+uint32_t test_svqincd_n_u32_1(uint32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincd,_n_u32,,)(op, 17);
+}
+
+uint64_t test_svqincd_n_u64(uint64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincd,_n_u64,,)(op, 0);
+}
+
+uint64_t test_svqincd_n_u64_1(uint64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincd,_n_u64,,)(op, 17);
+}
+
+int32_t test_svqincd_pat_n_s32(int32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincd_pat,_n_s32,,)(op, SV_POW2, 0);
+}
+
+int32_t test_svqincd_pat_n_s32_1(int32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincd_pat,_n_s32,,)(op, SV_VL1, 17);
+}
+
+int64_t test_svqincd_pat_n_s64(int64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincd_pat,_n_s64,,)(op, SV_VL2, 0);
+}
+
+int64_t test_svqincd_pat_n_s64_1(int64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincd_pat,_n_s64,,)(op, SV_VL3, 17);
+}
+
+uint32_t test_svqincd_pat_n_u32(uint32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincd_pat,_n_u32,,)(op, SV_VL4, 0);
+}
+
+uint32_t test_svqincd_pat_n_u32_1(uint32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincd_pat,_n_u32,,)(op, SV_VL5, 17);
+}
+
+uint64_t test_svqincd_pat_n_u64(uint64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincd_pat,_n_u64,,)(op, SV_VL6, 0);
+}
+
+uint64_t test_svqincd_pat_n_u64_1(uint64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincd_pat,_n_u64,,)(op, SV_VL7, 17);
+}
+
+svint64_t test_svqincd_s64(svint64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincd,_s64,,)(op, 0);
+}
+
+svint64_t test_svqincd_s64_1(svint64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincd,_s64,,)(op, 17);
+}
+
+svuint64_t test_svqincd_u64(svuint64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincd,_u64,,)(op, 0);
+}
+
+svuint64_t test_svqincd_u64_1(svuint64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincd,_u64,,)(op, 17);
+}
+
+svint64_t test_svqincd_pat_s64(svint64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincd_pat,_s64,,)(op, SV_VL8, 0);
+}
+
+svint64_t test_svqincd_pat_s64_1(svint64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincd_pat,_s64,,)(op, SV_VL16, 17);
+}
+
+svuint64_t test_svqincd_pat_u64(svuint64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincd_pat,_u64,,)(op, SV_VL32, 0);
+}
+
+svuint64_t test_svqincd_pat_u64_1(svuint64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincd_pat,_u64,,)(op, SV_VL64, 17);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_qinch.c b/clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_qinch.c
new file mode 100644
index 000000000000..b67c5ccdc88a
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_qinch.c
@@ -0,0 +1,155 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify %s
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+#include <arm_sve.h>
+
+int32_t test_svqinch_n_s32(int32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqinch,_n_s32,,)(op, 0);
+}
+
+int32_t test_svqinch_n_s32_1(int32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqinch,_n_s32,,)(op, 17);
+}
+
+int64_t test_svqinch_n_s64(int64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqinch,_n_s64,,)(op, 0);
+}
+
+int64_t test_svqinch_n_s64_1(int64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqinch,_n_s64,,)(op, 17);
+}
+
+uint32_t test_svqinch_n_u32(uint32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqinch,_n_u32,,)(op, 0);
+}
+
+uint32_t test_svqinch_n_u32_1(uint32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqinch,_n_u32,,)(op, 17);
+}
+
+uint64_t test_svqinch_n_u64(uint64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqinch,_n_u64,,)(op, 0);
+}
+
+uint64_t test_svqinch_n_u64_1(uint64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqinch,_n_u64,,)(op, 17);
+}
+
+int32_t test_svqinch_pat_n_s32(int32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqinch_pat,_n_s32,,)(op, SV_POW2, 0);
+}
+
+int32_t test_svqinch_pat_n_s32_1(int32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqinch_pat,_n_s32,,)(op, SV_VL1, 17);
+}
+
+int64_t test_svqinch_pat_n_s64(int64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqinch_pat,_n_s64,,)(op, SV_VL2, 0);
+}
+
+int64_t test_svqinch_pat_n_s64_1(int64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqinch_pat,_n_s64,,)(op, SV_VL3, 17);
+}
+
+uint32_t test_svqinch_pat_n_u32(uint32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqinch_pat,_n_u32,,)(op, SV_VL4, 0);
+}
+
+uint32_t test_svqinch_pat_n_u32_1(uint32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqinch_pat,_n_u32,,)(op, SV_VL5, 17);
+}
+
+uint64_t test_svqinch_pat_n_u64(uint64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqinch_pat,_n_u64,,)(op, SV_VL6, 0);
+}
+
+uint64_t test_svqinch_pat_n_u64_1(uint64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqinch_pat,_n_u64,,)(op, SV_VL7, 17);
+}
+
+svint16_t test_svqinch_s16(svint16_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqinch,_s16,,)(op, 0);
+}
+
+svint16_t test_svqinch_s16_1(svint16_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqinch,_s16,,)(op, 17);
+}
+
+svuint16_t test_svqinch_u16(svuint16_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqinch,_u16,,)(op, 0);
+}
+
+svuint16_t test_svqinch_u16_1(svuint16_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqinch,_u16,,)(op, 17);
+}
+
+svint16_t test_svqinch_pat_s16(svint16_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqinch_pat,_s16,,)(op, SV_VL8, 0);
+}
+
+svint16_t test_svqinch_pat_s16_1(svint16_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqinch_pat,_s16,,)(op, SV_VL16, 17);
+}
+
+svuint16_t test_svqinch_pat_u16(svuint16_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqinch_pat,_u16,,)(op, SV_VL32, 0);
+}
+
+svuint16_t test_svqinch_pat_u16_1(svuint16_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqinch_pat,_u16,,)(op, SV_VL64, 17);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_qincw.c b/clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_qincw.c
new file mode 100644
index 000000000000..7180f8bbd164
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve-intrinsics/negative/acle_sve_qincw.c
@@ -0,0 +1,155 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify %s
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+#include <arm_sve.h>
+
+int32_t test_svqincw_n_s32(int32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincw,_n_s32,,)(op, 0);
+}
+
+int32_t test_svqincw_n_s32_1(int32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincw,_n_s32,,)(op, 17);
+}
+
+int64_t test_svqincw_n_s64(int64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincw,_n_s64,,)(op, 0);
+}
+
+int64_t test_svqincw_n_s64_1(int64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincw,_n_s64,,)(op, 17);
+}
+
+uint32_t test_svqincw_n_u32(uint32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincw,_n_u32,,)(op, 0);
+}
+
+uint32_t test_svqincw_n_u32_1(uint32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincw,_n_u32,,)(op, 17);
+}
+
+uint64_t test_svqincw_n_u64(uint64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincw,_n_u64,,)(op, 0);
+}
+
+uint64_t test_svqincw_n_u64_1(uint64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincw,_n_u64,,)(op, 17);
+}
+
+int32_t test_svqincw_pat_n_s32(int32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincw_pat,_n_s32,,)(op, SV_POW2, 0);
+}
+
+int32_t test_svqincw_pat_n_s32_1(int32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincw_pat,_n_s32,,)(op, SV_VL1, 17);
+}
+
+int64_t test_svqincw_pat_n_s64(int64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincw_pat,_n_s64,,)(op, SV_VL2, 0);
+}
+
+int64_t test_svqincw_pat_n_s64_1(int64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincw_pat,_n_s64,,)(op, SV_VL3, 17);
+}
+
+uint32_t test_svqincw_pat_n_u32(uint32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincw_pat,_n_u32,,)(op, SV_VL4, 0);
+}
+
+uint32_t test_svqincw_pat_n_u32_1(uint32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincw_pat,_n_u32,,)(op, SV_VL5, 17);
+}
+
+uint64_t test_svqincw_pat_n_u64(uint64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincw_pat,_n_u64,,)(op, SV_VL6, 0);
+}
+
+uint64_t test_svqincw_pat_n_u64_1(uint64_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincw_pat,_n_u64,,)(op, SV_VL7, 17);
+}
+
+svint32_t test_svqincw_s32(svint32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincw,_s32,,)(op, 0);
+}
+
+svint32_t test_svqincw_s32_1(svint32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincw,_s32,,)(op, 17);
+}
+
+svuint32_t test_svqincw_u32(svuint32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincw,_u32,,)(op, 0);
+}
+
+svuint32_t test_svqincw_u32_1(svuint32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincw,_u32,,)(op, 17);
+}
+
+svint32_t test_svqincw_pat_s32(svint32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincw_pat,_s32,,)(op, SV_VL8, 0);
+}
+
+svint32_t test_svqincw_pat_s32_1(svint32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincw_pat,_s32,,)(op, SV_VL16, 17);
+}
+
+svuint32_t test_svqincw_pat_u32(svuint32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincw_pat,_u32,,)(op, SV_VL32, 0);
+}
+
+svuint32_t test_svqincw_pat_u32_1(svuint32_t op)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svqincw_pat,_u32,,)(op, SV_VL64, 17);
+}

diff  --git a/clang/utils/TableGen/SveEmitter.cpp b/clang/utils/TableGen/SveEmitter.cpp
index 9a4b3592a16e..9376cd63939f 100644
--- a/clang/utils/TableGen/SveEmitter.cpp
+++ b/clang/utils/TableGen/SveEmitter.cpp
@@ -505,6 +505,7 @@ void SVEType::applyModifier(char Mod) {
     Bitwidth = 16;
     ElementBitwidth = 1;
     break;
+  case 's':
   case 'a':
     Bitwidth = ElementBitwidth;
     NumVectors = 0;


        


More information about the cfe-commits mailing list