[clang] b0b658e - [SveEmitter] Add builtins for SVE2 Widening DSP operations

Sander de Smalen via cfe-commits cfe-commits at lists.llvm.org
Thu May 7 08:12:46 PDT 2020


Author: Sander de Smalen
Date: 2020-05-07T16:09:31+01:00
New Revision: b0b658e7fcf073b0e90938891b0e9b128734cb44

URL: https://github.com/llvm/llvm-project/commit/b0b658e7fcf073b0e90938891b0e9b128734cb44
DIFF: https://github.com/llvm/llvm-project/commit/b0b658e7fcf073b0e90938891b0e9b128734cb44.diff

LOG: [SveEmitter] Add builtins for SVE2 Widening DSP operations

This patch adds builtins for:
- svabalb
- svabalt
- svabdlb
- svabdlt
- svaddlb
- svaddlt
- svaddwb
- svaddwt
- svmlalb, svmlalb_lane
- svmlalt, svmlalt_lane
- svmlslb, svmlslb_lane
- svmlslt, svmlslt_lane
- svmullb, svmullb_lane
- svmullt, svmullt_lane
- svqdmlalb, svqdmlalb_lane
- svqdmlalt, svqdmlalt_lane
- svqdmlslb, svqdmlslb_lane
- svqdmlslt, svqdmlslt_lane
- svqdmullb, svqdmullb_lane
- svqdmullt, svqdmullt_lane
- svshllb
- svshllt
- svsublb
- svsublt
- svsubwb
- svsubwt

Added: 
    clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_abalb.c
    clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_abalt.c
    clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_abdlb.c
    clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_abdlt.c
    clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_addlb.c
    clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_addlt.c
    clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_addwb.c
    clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_addwt.c
    clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_mlalb.c
    clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_mlalt.c
    clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_mlslb.c
    clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_mlslt.c
    clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_mullb.c
    clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_mullt.c
    clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qdmlalb.c
    clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qdmlalt.c
    clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qdmlslb.c
    clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qdmlslt.c
    clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qdmullb.c
    clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qdmullt.c
    clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_shllb.c
    clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_shllt.c
    clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_sublb.c
    clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_sublt.c
    clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_subwb.c
    clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_subwt.c
    clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_mlalb.c
    clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_mlalt.c
    clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_mlslb.c
    clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_mlslt.c
    clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_mullb.c
    clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_mullt.c
    clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_qdmlalb.c
    clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_qdmlalt.c
    clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_qdmlslb.c
    clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_qdmlslt.c
    clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_qdmullb.c
    clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_qdmullt.c
    clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_shllb.c
    clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_shllt.c

Modified: 
    clang/include/clang/Basic/arm_sve.td

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/Basic/arm_sve.td b/clang/include/clang/Basic/arm_sve.td
index 4094c0b5f9c3..027f16cd5afe 100644
--- a/clang/include/clang/Basic/arm_sve.td
+++ b/clang/include/clang/Basic/arm_sve.td
@@ -1418,6 +1418,95 @@ def SVSQRDCMLAH_LANE_X : SInst<"svqrdcmlah_lane[_{d}]", "ddddii", "si",
                                                                                                                                   ImmCheck<4, ImmCheckComplexRotAll90>]>;
 }
 
+////////////////////////////////////////////////////////////////////////////////
+// SVE2 - Widening DSP operations
+
+multiclass SInstWideDSPAcc<string name, string types, string intrinsic> {
+  def    : SInst<name # "[_{d}]",   "ddhh", types, MergeNone, intrinsic>;
+  def _N : SInst<name # "[_n_{d}]", "ddhR", types, MergeNone, intrinsic>;
+}
+
+multiclass SInstWideDSPLong<string name, string types, string intrinsic> {
+  def    : SInst<name # "[_{d}]",   "dhh", types, MergeNone, intrinsic>;
+  def _N : SInst<name # "[_n_{d}]", "dhR", types, MergeNone, intrinsic>;
+}
+
+multiclass SInstWideDSPWide<string name, string types, string intrinsic> {
+  def    : SInst<name # "[_{d}]",   "ddh", types, MergeNone, intrinsic>;
+  def _N : SInst<name # "[_n_{d}]", "ddR", types, MergeNone, intrinsic>;
+}
+
+let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
+defm SVABALB_S : SInstWideDSPAcc<"svabalb",   "sil",    "aarch64_sve_sabalb">;
+defm SVABALB_U : SInstWideDSPAcc<"svabalb",   "UsUiUl", "aarch64_sve_uabalb">;
+defm SVABALT_S : SInstWideDSPAcc<"svabalt",   "sil",    "aarch64_sve_sabalt">;
+defm SVABALT_U : SInstWideDSPAcc<"svabalt",   "UsUiUl", "aarch64_sve_uabalt">;
+defm SVMLALB_S : SInstWideDSPAcc<"svmlalb",   "sil",    "aarch64_sve_smlalb">;
+defm SVMLALB_U : SInstWideDSPAcc<"svmlalb",   "UsUiUl", "aarch64_sve_umlalb">;
+defm SVMLALT_S : SInstWideDSPAcc<"svmlalt",   "sil",    "aarch64_sve_smlalt">;
+defm SVMLALT_U : SInstWideDSPAcc<"svmlalt",   "UsUiUl", "aarch64_sve_umlalt">;
+defm SVMLSLB_S : SInstWideDSPAcc<"svmlslb",   "sil",    "aarch64_sve_smlslb">;
+defm SVMLSLB_U : SInstWideDSPAcc<"svmlslb",   "UsUiUl", "aarch64_sve_umlslb">;
+defm SVMLSLT_S : SInstWideDSPAcc<"svmlslt",   "sil",    "aarch64_sve_smlslt">;
+defm SVMLSLT_U : SInstWideDSPAcc<"svmlslt",   "UsUiUl", "aarch64_sve_umlslt">;
+defm SVQDMLALB : SInstWideDSPAcc<"svqdmlalb", "sil",    "aarch64_sve_sqdmlalb">;
+defm SVQDMLALT : SInstWideDSPAcc<"svqdmlalt", "sil",    "aarch64_sve_sqdmlalt">;
+defm SVQDMLSLB : SInstWideDSPAcc<"svqdmlslb", "sil",    "aarch64_sve_sqdmlslb">;
+defm SVQDMLSLT : SInstWideDSPAcc<"svqdmlslt", "sil",    "aarch64_sve_sqdmlslt">;
+
+defm SVABDLB_S : SInstWideDSPLong<"svabdlb",   "sil",    "aarch64_sve_sabdlb">;
+defm SVABDLB_U : SInstWideDSPLong<"svabdlb",   "UsUiUl", "aarch64_sve_uabdlb">;
+defm SVABDLT_S : SInstWideDSPLong<"svabdlt",   "sil",    "aarch64_sve_sabdlt">;
+defm SVABDLT_U : SInstWideDSPLong<"svabdlt",   "UsUiUl", "aarch64_sve_uabdlt">;
+defm SVADDLB_S : SInstWideDSPLong<"svaddlb",   "sil",    "aarch64_sve_saddlb">;
+defm SVADDLB_U : SInstWideDSPLong<"svaddlb",   "UsUiUl", "aarch64_sve_uaddlb">;
+defm SVADDLT_S : SInstWideDSPLong<"svaddlt",   "sil",    "aarch64_sve_saddlt">;
+defm SVADDLT_U : SInstWideDSPLong<"svaddlt",   "UsUiUl", "aarch64_sve_uaddlt">;
+defm SVMULLB_S : SInstWideDSPLong<"svmullb",   "sil",    "aarch64_sve_smullb">;
+defm SVMULLB_U : SInstWideDSPLong<"svmullb",   "UsUiUl", "aarch64_sve_umullb">;
+defm SVMULLT_S : SInstWideDSPLong<"svmullt",   "sil",    "aarch64_sve_smullt">;
+defm SVMULLT_U : SInstWideDSPLong<"svmullt",   "UsUiUl", "aarch64_sve_umullt">;
+defm SVQDMULLB : SInstWideDSPLong<"svqdmullb", "sil",    "aarch64_sve_sqdmullb">;
+defm SVQDMULLT : SInstWideDSPLong<"svqdmullt", "sil",    "aarch64_sve_sqdmullt">;
+defm SVSUBLB_S : SInstWideDSPLong<"svsublb",   "sil",    "aarch64_sve_ssublb">;
+defm SVSUBLB_U : SInstWideDSPLong<"svsublb",   "UsUiUl", "aarch64_sve_usublb">;
+defm SVSUBLT_S : SInstWideDSPLong<"svsublt",   "sil",    "aarch64_sve_ssublt">;
+defm SVSUBLT_U : SInstWideDSPLong<"svsublt",   "UsUiUl", "aarch64_sve_usublt">;
+
+defm SVADDWB_S : SInstWideDSPWide<"svaddwb", "sil",    "aarch64_sve_saddwb">;
+defm SVADDWB_U : SInstWideDSPWide<"svaddwb", "UsUiUl", "aarch64_sve_uaddwb">;
+defm SVADDWT_S : SInstWideDSPWide<"svaddwt", "sil",    "aarch64_sve_saddwt">;
+defm SVADDWT_U : SInstWideDSPWide<"svaddwt", "UsUiUl", "aarch64_sve_uaddwt">;
+defm SVSUBWB_S : SInstWideDSPWide<"svsubwb", "sil",    "aarch64_sve_ssubwb">;
+defm SVSUBWB_U : SInstWideDSPWide<"svsubwb", "UsUiUl", "aarch64_sve_usubwb">;
+defm SVSUBWT_S : SInstWideDSPWide<"svsubwt", "sil",    "aarch64_sve_ssubwt">;
+defm SVSUBWT_U : SInstWideDSPWide<"svsubwt", "UsUiUl", "aarch64_sve_usubwt">;
+
+def SVSHLLB_S_N : SInst<"svshllb[_n_{d}]", "dhi", "sil",    MergeNone, "aarch64_sve_sshllb", [], [ImmCheck<1, ImmCheckShiftLeft,  0>]>;
+def SVSHLLB_U_N : SInst<"svshllb[_n_{d}]", "dhi", "UsUiUl", MergeNone, "aarch64_sve_ushllb", [], [ImmCheck<1, ImmCheckShiftLeft,  0>]>;
+def SVSHLLT_S_N : SInst<"svshllt[_n_{d}]", "dhi", "sil",    MergeNone, "aarch64_sve_sshllt", [], [ImmCheck<1, ImmCheckShiftLeft,  0>]>;
+def SVSHLLT_U_N : SInst<"svshllt[_n_{d}]", "dhi", "UsUiUl", MergeNone, "aarch64_sve_ushllt", [], [ImmCheck<1, ImmCheckShiftLeft,  0>]>;
+
+def SVMLALB_S_LANE : SInst<"svmlalb_lane[_{d}]",   "ddhhi", "il",   MergeNone, "aarch64_sve_smlalb_lane",   [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVMLALB_U_LANE : SInst<"svmlalb_lane[_{d}]",   "ddhhi", "UiUl", MergeNone, "aarch64_sve_umlalb_lane",   [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVMLALT_S_LANE : SInst<"svmlalt_lane[_{d}]",   "ddhhi", "il",   MergeNone, "aarch64_sve_smlalt_lane",   [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVMLALT_U_LANE : SInst<"svmlalt_lane[_{d}]",   "ddhhi", "UiUl", MergeNone, "aarch64_sve_umlalt_lane",   [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVMLSLB_S_LANE : SInst<"svmlslb_lane[_{d}]",   "ddhhi", "il",   MergeNone, "aarch64_sve_smlslb_lane",   [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVMLSLB_U_LANE : SInst<"svmlslb_lane[_{d}]",   "ddhhi", "UiUl", MergeNone, "aarch64_sve_umlslb_lane",   [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVMLSLT_S_LANE : SInst<"svmlslt_lane[_{d}]",   "ddhhi", "il",   MergeNone, "aarch64_sve_smlslt_lane",   [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVMLSLT_U_LANE : SInst<"svmlslt_lane[_{d}]",   "ddhhi", "UiUl", MergeNone, "aarch64_sve_umlslt_lane",   [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVMULLB_S_LANE : SInst<"svmullb_lane[_{d}]",   "dhhi",  "il",   MergeNone, "aarch64_sve_smullb_lane",   [], [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
+def SVMULLB_U_LANE : SInst<"svmullb_lane[_{d}]",   "dhhi",  "UiUl", MergeNone, "aarch64_sve_umullb_lane",   [], [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
+def SVMULLT_S_LANE : SInst<"svmullt_lane[_{d}]",   "dhhi",  "il",   MergeNone, "aarch64_sve_smullt_lane",   [], [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
+def SVMULLT_U_LANE : SInst<"svmullt_lane[_{d}]",   "dhhi",  "UiUl", MergeNone, "aarch64_sve_umullt_lane",   [], [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
+def SVQDMLALB_LANE : SInst<"svqdmlalb_lane[_{d}]", "ddhhi", "il",   MergeNone, "aarch64_sve_sqdmlalb_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVQDMLALT_LANE : SInst<"svqdmlalt_lane[_{d}]", "ddhhi", "il",   MergeNone, "aarch64_sve_sqdmlalt_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVQDMLSLB_LANE : SInst<"svqdmlslb_lane[_{d}]", "ddhhi", "il",   MergeNone, "aarch64_sve_sqdmlslb_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVQDMLSLT_LANE : SInst<"svqdmlslt_lane[_{d}]", "ddhhi", "il",   MergeNone, "aarch64_sve_sqdmlslt_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVQDMULLB_LANE : SInst<"svqdmullb_lane[_{d}]", "dhhi",  "il",   MergeNone, "aarch64_sve_sqdmullb_lane", [], [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
+def SVQDMULLT_LANE : SInst<"svqdmullt_lane[_{d}]", "dhhi",  "il",   MergeNone, "aarch64_sve_sqdmullt_lane", [], [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
+}
+
 ////////////////////////////////////////////////////////////////////////////////
 // SVE2 - Non-temporal gather/scatter
 let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {

diff  --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_abalb.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_abalb.c
new file mode 100644
index 000000000000..850a6722f645
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_abalb.c
@@ -0,0 +1,139 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svint16_t test_svabalb_s16(svint16_t op1, svint8_t op2, svint8_t op3)
+{
+  // CHECK-LABEL: test_svabalb_s16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sabalb.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svabalb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svabalb_s16'}}
+  return SVE_ACLE_FUNC(svabalb,_s16,,)(op1, op2, op3);
+}
+
+svint32_t test_svabalb_s32(svint32_t op1, svint16_t op2, svint16_t op3)
+{
+  // CHECK-LABEL: test_svabalb_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sabalb.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svabalb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svabalb_s32'}}
+  return SVE_ACLE_FUNC(svabalb,_s32,,)(op1, op2, op3);
+}
+
+svint64_t test_svabalb_s64(svint64_t op1, svint32_t op2, svint32_t op3)
+{
+  // CHECK-LABEL: test_svabalb_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sabalb.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svabalb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svabalb_s64'}}
+  return SVE_ACLE_FUNC(svabalb,_s64,,)(op1, op2, op3);
+}
+
+svuint16_t test_svabalb_u16(svuint16_t op1, svuint8_t op2, svuint8_t op3)
+{
+  // CHECK-LABEL: test_svabalb_u16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uabalb.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svabalb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svabalb_u16'}}
+  return SVE_ACLE_FUNC(svabalb,_u16,,)(op1, op2, op3);
+}
+
+svuint32_t test_svabalb_u32(svuint32_t op1, svuint16_t op2, svuint16_t op3)
+{
+  // CHECK-LABEL: test_svabalb_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uabalb.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svabalb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svabalb_u32'}}
+  return SVE_ACLE_FUNC(svabalb,_u32,,)(op1, op2, op3);
+}
+
+svuint64_t test_svabalb_u64(svuint64_t op1, svuint32_t op2, svuint32_t op3)
+{
+  // CHECK-LABEL: test_svabalb_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uabalb.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svabalb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svabalb_u64'}}
+  return SVE_ACLE_FUNC(svabalb,_u64,,)(op1, op2, op3);
+}
+
+svint16_t test_svabalb_n_s16(svint16_t op1, svint8_t op2, int8_t op3)
+{
+  // CHECK-LABEL: test_svabalb_n_s16
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sabalb.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svabalb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svabalb_n_s16'}}
+  return SVE_ACLE_FUNC(svabalb,_n_s16,,)(op1, op2, op3);
+}
+
+svint32_t test_svabalb_n_s32(svint32_t op1, svint16_t op2, int16_t op3)
+{
+  // CHECK-LABEL: test_svabalb_n_s32
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sabalb.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svabalb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svabalb_n_s32'}}
+  return SVE_ACLE_FUNC(svabalb,_n_s32,,)(op1, op2, op3);
+}
+
+svint64_t test_svabalb_n_s64(svint64_t op1, svint32_t op2, int32_t op3)
+{
+  // CHECK-LABEL: test_svabalb_n_s64
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sabalb.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svabalb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svabalb_n_s64'}}
+  return SVE_ACLE_FUNC(svabalb,_n_s64,,)(op1, op2, op3);
+}
+
+svuint16_t test_svabalb_n_u16(svuint16_t op1, svuint8_t op2, uint8_t op3)
+{
+  // CHECK-LABEL: test_svabalb_n_u16
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uabalb.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svabalb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svabalb_n_u16'}}
+  return SVE_ACLE_FUNC(svabalb,_n_u16,,)(op1, op2, op3);
+}
+
+svuint32_t test_svabalb_n_u32(svuint32_t op1, svuint16_t op2, uint16_t op3)
+{
+  // CHECK-LABEL: test_svabalb_n_u32
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uabalb.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svabalb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svabalb_n_u32'}}
+  return SVE_ACLE_FUNC(svabalb,_n_u32,,)(op1, op2, op3);
+}
+
+svuint64_t test_svabalb_n_u64(svuint64_t op1, svuint32_t op2, uint32_t op3)
+{
+  // CHECK-LABEL: test_svabalb_n_u64
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uabalb.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svabalb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svabalb_n_u64'}}
+  return SVE_ACLE_FUNC(svabalb,_n_u64,,)(op1, op2, op3);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_abalt.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_abalt.c
new file mode 100644
index 000000000000..34a2cb915c5d
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_abalt.c
@@ -0,0 +1,139 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svint16_t test_svabalt_s16(svint16_t op1, svint8_t op2, svint8_t op3)
+{
+  // CHECK-LABEL: test_svabalt_s16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sabalt.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svabalt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svabalt_s16'}}
+  return SVE_ACLE_FUNC(svabalt,_s16,,)(op1, op2, op3);
+}
+
+svint32_t test_svabalt_s32(svint32_t op1, svint16_t op2, svint16_t op3)
+{
+  // CHECK-LABEL: test_svabalt_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sabalt.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svabalt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svabalt_s32'}}
+  return SVE_ACLE_FUNC(svabalt,_s32,,)(op1, op2, op3);
+}
+
+svint64_t test_svabalt_s64(svint64_t op1, svint32_t op2, svint32_t op3)
+{
+  // CHECK-LABEL: test_svabalt_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sabalt.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svabalt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svabalt_s64'}}
+  return SVE_ACLE_FUNC(svabalt,_s64,,)(op1, op2, op3);
+}
+
+svuint16_t test_svabalt_u16(svuint16_t op1, svuint8_t op2, svuint8_t op3)
+{
+  // CHECK-LABEL: test_svabalt_u16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uabalt.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svabalt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svabalt_u16'}}
+  return SVE_ACLE_FUNC(svabalt,_u16,,)(op1, op2, op3);
+}
+
+svuint32_t test_svabalt_u32(svuint32_t op1, svuint16_t op2, svuint16_t op3)
+{
+  // CHECK-LABEL: test_svabalt_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uabalt.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svabalt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svabalt_u32'}}
+  return SVE_ACLE_FUNC(svabalt,_u32,,)(op1, op2, op3);
+}
+
+svuint64_t test_svabalt_u64(svuint64_t op1, svuint32_t op2, svuint32_t op3)
+{
+  // CHECK-LABEL: test_svabalt_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uabalt.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svabalt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svabalt_u64'}}
+  return SVE_ACLE_FUNC(svabalt,_u64,,)(op1, op2, op3);
+}
+
+svint16_t test_svabalt_n_s16(svint16_t op1, svint8_t op2, int8_t op3)
+{
+  // CHECK-LABEL: test_svabalt_n_s16
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sabalt.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svabalt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svabalt_n_s16'}}
+  return SVE_ACLE_FUNC(svabalt,_n_s16,,)(op1, op2, op3);
+}
+
+svint32_t test_svabalt_n_s32(svint32_t op1, svint16_t op2, int16_t op3)
+{
+  // CHECK-LABEL: test_svabalt_n_s32
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sabalt.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svabalt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svabalt_n_s32'}}
+  return SVE_ACLE_FUNC(svabalt,_n_s32,,)(op1, op2, op3);
+}
+
+svint64_t test_svabalt_n_s64(svint64_t op1, svint32_t op2, int32_t op3)
+{
+  // CHECK-LABEL: test_svabalt_n_s64
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sabalt.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svabalt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svabalt_n_s64'}}
+  return SVE_ACLE_FUNC(svabalt,_n_s64,,)(op1, op2, op3);
+}
+
+svuint16_t test_svabalt_n_u16(svuint16_t op1, svuint8_t op2, uint8_t op3)
+{
+  // CHECK-LABEL: test_svabalt_n_u16
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uabalt.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svabalt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svabalt_n_u16'}}
+  return SVE_ACLE_FUNC(svabalt,_n_u16,,)(op1, op2, op3);
+}
+
+svuint32_t test_svabalt_n_u32(svuint32_t op1, svuint16_t op2, uint16_t op3)
+{
+  // CHECK-LABEL: test_svabalt_n_u32
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uabalt.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svabalt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svabalt_n_u32'}}
+  return SVE_ACLE_FUNC(svabalt,_n_u32,,)(op1, op2, op3);
+}
+
+svuint64_t test_svabalt_n_u64(svuint64_t op1, svuint32_t op2, uint32_t op3)
+{
+  // CHECK-LABEL: test_svabalt_n_u64
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uabalt.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svabalt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svabalt_n_u64'}}
+  return SVE_ACLE_FUNC(svabalt,_n_u64,,)(op1, op2, op3);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_abdlb.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_abdlb.c
new file mode 100644
index 000000000000..2453fd99ca1e
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_abdlb.c
@@ -0,0 +1,139 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svint16_t test_svabdlb_s16(svint8_t op1, svint8_t op2)
+{
+  // CHECK-LABEL: test_svabdlb_s16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sabdlb.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svabdlb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svabdlb_s16'}}
+  return SVE_ACLE_FUNC(svabdlb,_s16,,)(op1, op2);
+}
+
+svint32_t test_svabdlb_s32(svint16_t op1, svint16_t op2)
+{
+  // CHECK-LABEL: test_svabdlb_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sabdlb.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svabdlb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svabdlb_s32'}}
+  return SVE_ACLE_FUNC(svabdlb,_s32,,)(op1, op2);
+}
+
+svint64_t test_svabdlb_s64(svint32_t op1, svint32_t op2)
+{
+  // CHECK-LABEL: test_svabdlb_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sabdlb.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svabdlb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svabdlb_s64'}}
+  return SVE_ACLE_FUNC(svabdlb,_s64,,)(op1, op2);
+}
+
+svuint16_t test_svabdlb_u16(svuint8_t op1, svuint8_t op2)
+{
+  // CHECK-LABEL: test_svabdlb_u16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uabdlb.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svabdlb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svabdlb_u16'}}
+  return SVE_ACLE_FUNC(svabdlb,_u16,,)(op1, op2);
+}
+
+svuint32_t test_svabdlb_u32(svuint16_t op1, svuint16_t op2)
+{
+  // CHECK-LABEL: test_svabdlb_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uabdlb.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svabdlb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svabdlb_u32'}}
+  return SVE_ACLE_FUNC(svabdlb,_u32,,)(op1, op2);
+}
+
+svuint64_t test_svabdlb_u64(svuint32_t op1, svuint32_t op2)
+{
+  // CHECK-LABEL: test_svabdlb_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uabdlb.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svabdlb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svabdlb_u64'}}
+  return SVE_ACLE_FUNC(svabdlb,_u64,,)(op1, op2);
+}
+
+svint16_t test_svabdlb_n_s16(svint8_t op1, int8_t op2)
+{
+  // CHECK-LABEL: test_svabdlb_n_s16
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sabdlb.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svabdlb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svabdlb_n_s16'}}
+  return SVE_ACLE_FUNC(svabdlb,_n_s16,,)(op1, op2);
+}
+
+svint32_t test_svabdlb_n_s32(svint16_t op1, int16_t op2)
+{
+  // CHECK-LABEL: test_svabdlb_n_s32
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sabdlb.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svabdlb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svabdlb_n_s32'}}
+  return SVE_ACLE_FUNC(svabdlb,_n_s32,,)(op1, op2);
+}
+
+svint64_t test_svabdlb_n_s64(svint32_t op1, int32_t op2)
+{
+  // CHECK-LABEL: test_svabdlb_n_s64
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sabdlb.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svabdlb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svabdlb_n_s64'}}
+  return SVE_ACLE_FUNC(svabdlb,_n_s64,,)(op1, op2);
+}
+
+svuint16_t test_svabdlb_n_u16(svuint8_t op1, uint8_t op2)
+{
+  // CHECK-LABEL: test_svabdlb_n_u16
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uabdlb.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svabdlb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svabdlb_n_u16'}}
+  return SVE_ACLE_FUNC(svabdlb,_n_u16,,)(op1, op2);
+}
+
+svuint32_t test_svabdlb_n_u32(svuint16_t op1, uint16_t op2)
+{
+  // CHECK-LABEL: test_svabdlb_n_u32
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uabdlb.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svabdlb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svabdlb_n_u32'}}
+  return SVE_ACLE_FUNC(svabdlb,_n_u32,,)(op1, op2);
+}
+
+svuint64_t test_svabdlb_n_u64(svuint32_t op1, uint32_t op2)
+{
+  // CHECK-LABEL: test_svabdlb_n_u64
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uabdlb.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svabdlb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svabdlb_n_u64'}}
+  return SVE_ACLE_FUNC(svabdlb,_n_u64,,)(op1, op2);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_abdlt.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_abdlt.c
new file mode 100644
index 000000000000..dbd333ab764b
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_abdlt.c
@@ -0,0 +1,139 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svint16_t test_svabdlt_s16(svint8_t op1, svint8_t op2)
+{
+  // CHECK-LABEL: test_svabdlt_s16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sabdlt.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svabdlt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svabdlt_s16'}}
+  return SVE_ACLE_FUNC(svabdlt,_s16,,)(op1, op2);
+}
+
+svint32_t test_svabdlt_s32(svint16_t op1, svint16_t op2)
+{
+  // CHECK-LABEL: test_svabdlt_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sabdlt.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svabdlt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svabdlt_s32'}}
+  return SVE_ACLE_FUNC(svabdlt,_s32,,)(op1, op2);
+}
+
+svint64_t test_svabdlt_s64(svint32_t op1, svint32_t op2)
+{
+  // CHECK-LABEL: test_svabdlt_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sabdlt.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svabdlt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svabdlt_s64'}}
+  return SVE_ACLE_FUNC(svabdlt,_s64,,)(op1, op2);
+}
+
+svuint16_t test_svabdlt_u16(svuint8_t op1, svuint8_t op2)
+{
+  // CHECK-LABEL: test_svabdlt_u16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uabdlt.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svabdlt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svabdlt_u16'}}
+  return SVE_ACLE_FUNC(svabdlt,_u16,,)(op1, op2);
+}
+
+svuint32_t test_svabdlt_u32(svuint16_t op1, svuint16_t op2)
+{
+  // CHECK-LABEL: test_svabdlt_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uabdlt.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svabdlt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svabdlt_u32'}}
+  return SVE_ACLE_FUNC(svabdlt,_u32,,)(op1, op2);
+}
+
+svuint64_t test_svabdlt_u64(svuint32_t op1, svuint32_t op2)
+{
+  // CHECK-LABEL: test_svabdlt_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uabdlt.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svabdlt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svabdlt_u64'}}
+  return SVE_ACLE_FUNC(svabdlt,_u64,,)(op1, op2);
+}
+
+svint16_t test_svabdlt_n_s16(svint8_t op1, int8_t op2)
+{
+  // CHECK-LABEL: test_svabdlt_n_s16
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sabdlt.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svabdlt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svabdlt_n_s16'}}
+  return SVE_ACLE_FUNC(svabdlt,_n_s16,,)(op1, op2);
+}
+
+svint32_t test_svabdlt_n_s32(svint16_t op1, int16_t op2)
+{
+  // CHECK-LABEL: test_svabdlt_n_s32
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sabdlt.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svabdlt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svabdlt_n_s32'}}
+  return SVE_ACLE_FUNC(svabdlt,_n_s32,,)(op1, op2);
+}
+
+svint64_t test_svabdlt_n_s64(svint32_t op1, int32_t op2)
+{
+  // CHECK-LABEL: test_svabdlt_n_s64
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sabdlt.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svabdlt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svabdlt_n_s64'}}
+  return SVE_ACLE_FUNC(svabdlt,_n_s64,,)(op1, op2);
+}
+
+svuint16_t test_svabdlt_n_u16(svuint8_t op1, uint8_t op2)
+{
+  // CHECK-LABEL: test_svabdlt_n_u16
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uabdlt.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svabdlt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svabdlt_n_u16'}}
+  return SVE_ACLE_FUNC(svabdlt,_n_u16,,)(op1, op2);
+}
+
+svuint32_t test_svabdlt_n_u32(svuint16_t op1, uint16_t op2)
+{
+  // CHECK-LABEL: test_svabdlt_n_u32
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uabdlt.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svabdlt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svabdlt_n_u32'}}
+  return SVE_ACLE_FUNC(svabdlt,_n_u32,,)(op1, op2);
+}
+
+svuint64_t test_svabdlt_n_u64(svuint32_t op1, uint32_t op2)
+{
+  // CHECK-LABEL: test_svabdlt_n_u64
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uabdlt.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svabdlt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svabdlt_n_u64'}}
+  return SVE_ACLE_FUNC(svabdlt,_n_u64,,)(op1, op2);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_addlb.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_addlb.c
new file mode 100644
index 000000000000..39096523b223
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_addlb.c
@@ -0,0 +1,139 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svint16_t test_svaddlb_s16(svint8_t op1, svint8_t op2)
+{
+  // CHECK-LABEL: test_svaddlb_s16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.saddlb.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svaddlb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svaddlb_s16'}}
+  return SVE_ACLE_FUNC(svaddlb,_s16,,)(op1, op2);
+}
+
+svint32_t test_svaddlb_s32(svint16_t op1, svint16_t op2)
+{
+  // CHECK-LABEL: test_svaddlb_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.saddlb.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svaddlb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svaddlb_s32'}}
+  return SVE_ACLE_FUNC(svaddlb,_s32,,)(op1, op2);
+}
+
+svint64_t test_svaddlb_s64(svint32_t op1, svint32_t op2)
+{
+  // CHECK-LABEL: test_svaddlb_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.saddlb.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svaddlb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svaddlb_s64'}}
+  return SVE_ACLE_FUNC(svaddlb,_s64,,)(op1, op2);
+}
+
+svuint16_t test_svaddlb_u16(svuint8_t op1, svuint8_t op2)
+{
+  // CHECK-LABEL: test_svaddlb_u16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uaddlb.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svaddlb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svaddlb_u16'}}
+  return SVE_ACLE_FUNC(svaddlb,_u16,,)(op1, op2);
+}
+
+svuint32_t test_svaddlb_u32(svuint16_t op1, svuint16_t op2)
+{
+  // CHECK-LABEL: test_svaddlb_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uaddlb.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svaddlb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svaddlb_u32'}}
+  return SVE_ACLE_FUNC(svaddlb,_u32,,)(op1, op2);
+}
+
+svuint64_t test_svaddlb_u64(svuint32_t op1, svuint32_t op2)
+{
+  // CHECK-LABEL: test_svaddlb_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uaddlb.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svaddlb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svaddlb_u64'}}
+  return SVE_ACLE_FUNC(svaddlb,_u64,,)(op1, op2);
+}
+
+svint16_t test_svaddlb_n_s16(svint8_t op1, int8_t op2)
+{
+  // CHECK-LABEL: test_svaddlb_n_s16
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.saddlb.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svaddlb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svaddlb_n_s16'}}
+  return SVE_ACLE_FUNC(svaddlb,_n_s16,,)(op1, op2);
+}
+
+svint32_t test_svaddlb_n_s32(svint16_t op1, int16_t op2)
+{
+  // CHECK-LABEL: test_svaddlb_n_s32
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.saddlb.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svaddlb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svaddlb_n_s32'}}
+  return SVE_ACLE_FUNC(svaddlb,_n_s32,,)(op1, op2);
+}
+
+svint64_t test_svaddlb_n_s64(svint32_t op1, int32_t op2)
+{
+  // CHECK-LABEL: test_svaddlb_n_s64
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.saddlb.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svaddlb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svaddlb_n_s64'}}
+  return SVE_ACLE_FUNC(svaddlb,_n_s64,,)(op1, op2);
+}
+
+svuint16_t test_svaddlb_n_u16(svuint8_t op1, uint8_t op2)
+{
+  // CHECK-LABEL: test_svaddlb_n_u16
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uaddlb.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svaddlb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svaddlb_n_u16'}}
+  return SVE_ACLE_FUNC(svaddlb,_n_u16,,)(op1, op2);
+}
+
+svuint32_t test_svaddlb_n_u32(svuint16_t op1, uint16_t op2)
+{
+  // CHECK-LABEL: test_svaddlb_n_u32
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uaddlb.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svaddlb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svaddlb_n_u32'}}
+  return SVE_ACLE_FUNC(svaddlb,_n_u32,,)(op1, op2);
+}
+
+svuint64_t test_svaddlb_n_u64(svuint32_t op1, uint32_t op2)
+{
+  // CHECK-LABEL: test_svaddlb_n_u64
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uaddlb.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svaddlb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svaddlb_n_u64'}}
+  return SVE_ACLE_FUNC(svaddlb,_n_u64,,)(op1, op2);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_addlt.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_addlt.c
new file mode 100644
index 000000000000..5d5806361e04
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_addlt.c
@@ -0,0 +1,139 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svint16_t test_svaddlt_s16(svint8_t op1, svint8_t op2)
+{
+  // CHECK-LABEL: test_svaddlt_s16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.saddlt.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svaddlt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svaddlt_s16'}}
+  return SVE_ACLE_FUNC(svaddlt,_s16,,)(op1, op2);
+}
+
+svint32_t test_svaddlt_s32(svint16_t op1, svint16_t op2)
+{
+  // CHECK-LABEL: test_svaddlt_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.saddlt.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svaddlt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svaddlt_s32'}}
+  return SVE_ACLE_FUNC(svaddlt,_s32,,)(op1, op2);
+}
+
+svint64_t test_svaddlt_s64(svint32_t op1, svint32_t op2)
+{
+  // CHECK-LABEL: test_svaddlt_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.saddlt.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svaddlt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svaddlt_s64'}}
+  return SVE_ACLE_FUNC(svaddlt,_s64,,)(op1, op2);
+}
+
+svuint16_t test_svaddlt_u16(svuint8_t op1, svuint8_t op2)
+{
+  // CHECK-LABEL: test_svaddlt_u16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uaddlt.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svaddlt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svaddlt_u16'}}
+  return SVE_ACLE_FUNC(svaddlt,_u16,,)(op1, op2);
+}
+
+svuint32_t test_svaddlt_u32(svuint16_t op1, svuint16_t op2)
+{
+  // CHECK-LABEL: test_svaddlt_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uaddlt.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svaddlt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svaddlt_u32'}}
+  return SVE_ACLE_FUNC(svaddlt,_u32,,)(op1, op2);
+}
+
+svuint64_t test_svaddlt_u64(svuint32_t op1, svuint32_t op2)
+{
+  // CHECK-LABEL: test_svaddlt_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uaddlt.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svaddlt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svaddlt_u64'}}
+  return SVE_ACLE_FUNC(svaddlt,_u64,,)(op1, op2);
+}
+
+svint16_t test_svaddlt_n_s16(svint8_t op1, int8_t op2)
+{
+  // CHECK-LABEL: test_svaddlt_n_s16
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.saddlt.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svaddlt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svaddlt_n_s16'}}
+  return SVE_ACLE_FUNC(svaddlt,_n_s16,,)(op1, op2);
+}
+
+svint32_t test_svaddlt_n_s32(svint16_t op1, int16_t op2)
+{
+  // CHECK-LABEL: test_svaddlt_n_s32
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.saddlt.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svaddlt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svaddlt_n_s32'}}
+  return SVE_ACLE_FUNC(svaddlt,_n_s32,,)(op1, op2);
+}
+
+svint64_t test_svaddlt_n_s64(svint32_t op1, int32_t op2)
+{
+  // CHECK-LABEL: test_svaddlt_n_s64
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.saddlt.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svaddlt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svaddlt_n_s64'}}
+  return SVE_ACLE_FUNC(svaddlt,_n_s64,,)(op1, op2);
+}
+
+svuint16_t test_svaddlt_n_u16(svuint8_t op1, uint8_t op2)
+{
+  // CHECK-LABEL: test_svaddlt_n_u16
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uaddlt.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svaddlt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svaddlt_n_u16'}}
+  return SVE_ACLE_FUNC(svaddlt,_n_u16,,)(op1, op2);
+}
+
+svuint32_t test_svaddlt_n_u32(svuint16_t op1, uint16_t op2)
+{
+  // CHECK-LABEL: test_svaddlt_n_u32
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uaddlt.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svaddlt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svaddlt_n_u32'}}
+  return SVE_ACLE_FUNC(svaddlt,_n_u32,,)(op1, op2);
+}
+
+svuint64_t test_svaddlt_n_u64(svuint32_t op1, uint32_t op2)
+{
+  // CHECK-LABEL: test_svaddlt_n_u64
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uaddlt.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svaddlt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svaddlt_n_u64'}}
+  return SVE_ACLE_FUNC(svaddlt,_n_u64,,)(op1, op2);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_addwb.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_addwb.c
new file mode 100644
index 000000000000..85ec2f09aeb4
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_addwb.c
@@ -0,0 +1,139 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svint16_t test_svaddwb_s16(svint16_t op1, svint8_t op2)
+{
+  // CHECK-LABEL: test_svaddwb_s16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.saddwb.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svaddwb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svaddwb_s16'}}
+  return SVE_ACLE_FUNC(svaddwb,_s16,,)(op1, op2);
+}
+
+svint32_t test_svaddwb_s32(svint32_t op1, svint16_t op2)
+{
+  // CHECK-LABEL: test_svaddwb_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.saddwb.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svaddwb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svaddwb_s32'}}
+  return SVE_ACLE_FUNC(svaddwb,_s32,,)(op1, op2);
+}
+
+svint64_t test_svaddwb_s64(svint64_t op1, svint32_t op2)
+{
+  // CHECK-LABEL: test_svaddwb_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.saddwb.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svaddwb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svaddwb_s64'}}
+  return SVE_ACLE_FUNC(svaddwb,_s64,,)(op1, op2);
+}
+
+svuint16_t test_svaddwb_u16(svuint16_t op1, svuint8_t op2)
+{
+  // CHECK-LABEL: test_svaddwb_u16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uaddwb.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svaddwb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svaddwb_u16'}}
+  return SVE_ACLE_FUNC(svaddwb,_u16,,)(op1, op2);
+}
+
+svuint32_t test_svaddwb_u32(svuint32_t op1, svuint16_t op2)
+{
+  // CHECK-LABEL: test_svaddwb_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uaddwb.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svaddwb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svaddwb_u32'}}
+  return SVE_ACLE_FUNC(svaddwb,_u32,,)(op1, op2);
+}
+
+svuint64_t test_svaddwb_u64(svuint64_t op1, svuint32_t op2)
+{
+  // CHECK-LABEL: test_svaddwb_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uaddwb.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svaddwb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svaddwb_u64'}}
+  return SVE_ACLE_FUNC(svaddwb,_u64,,)(op1, op2);
+}
+
+svint16_t test_svaddwb_n_s16(svint16_t op1, int8_t op2)
+{
+  // CHECK-LABEL: test_svaddwb_n_s16
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.saddwb.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svaddwb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svaddwb_n_s16'}}
+  return SVE_ACLE_FUNC(svaddwb,_n_s16,,)(op1, op2);
+}
+
+svint32_t test_svaddwb_n_s32(svint32_t op1, int16_t op2)
+{
+  // CHECK-LABEL: test_svaddwb_n_s32
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.saddwb.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svaddwb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svaddwb_n_s32'}}
+  return SVE_ACLE_FUNC(svaddwb,_n_s32,,)(op1, op2);
+}
+
+svint64_t test_svaddwb_n_s64(svint64_t op1, int32_t op2)
+{
+  // CHECK-LABEL: test_svaddwb_n_s64
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.saddwb.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svaddwb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svaddwb_n_s64'}}
+  return SVE_ACLE_FUNC(svaddwb,_n_s64,,)(op1, op2);
+}
+
+svuint16_t test_svaddwb_n_u16(svuint16_t op1, uint8_t op2)
+{
+  // CHECK-LABEL: test_svaddwb_n_u16
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uaddwb.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svaddwb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svaddwb_n_u16'}}
+  return SVE_ACLE_FUNC(svaddwb,_n_u16,,)(op1, op2);
+}
+
+svuint32_t test_svaddwb_n_u32(svuint32_t op1, uint16_t op2)
+{
+  // CHECK-LABEL: test_svaddwb_n_u32
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uaddwb.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svaddwb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svaddwb_n_u32'}}
+  return SVE_ACLE_FUNC(svaddwb,_n_u32,,)(op1, op2);
+}
+
+svuint64_t test_svaddwb_n_u64(svuint64_t op1, uint32_t op2)
+{
+  // CHECK-LABEL: test_svaddwb_n_u64
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uaddwb.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svaddwb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svaddwb_n_u64'}}
+  return SVE_ACLE_FUNC(svaddwb,_n_u64,,)(op1, op2);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_addwt.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_addwt.c
new file mode 100644
index 000000000000..c4c40d295285
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_addwt.c
@@ -0,0 +1,139 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svint16_t test_svaddwt_s16(svint16_t op1, svint8_t op2)
+{
+  // CHECK-LABEL: test_svaddwt_s16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.saddwt.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svaddwt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svaddwt_s16'}}
+  return SVE_ACLE_FUNC(svaddwt,_s16,,)(op1, op2);
+}
+
+svint32_t test_svaddwt_s32(svint32_t op1, svint16_t op2)
+{
+  // CHECK-LABEL: test_svaddwt_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.saddwt.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svaddwt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svaddwt_s32'}}
+  return SVE_ACLE_FUNC(svaddwt,_s32,,)(op1, op2);
+}
+
+svint64_t test_svaddwt_s64(svint64_t op1, svint32_t op2)
+{
+  // CHECK-LABEL: test_svaddwt_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.saddwt.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svaddwt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svaddwt_s64'}}
+  return SVE_ACLE_FUNC(svaddwt,_s64,,)(op1, op2);
+}
+
+svuint16_t test_svaddwt_u16(svuint16_t op1, svuint8_t op2)
+{
+  // CHECK-LABEL: test_svaddwt_u16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uaddwt.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svaddwt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svaddwt_u16'}}
+  return SVE_ACLE_FUNC(svaddwt,_u16,,)(op1, op2);
+}
+
+svuint32_t test_svaddwt_u32(svuint32_t op1, svuint16_t op2)
+{
+  // CHECK-LABEL: test_svaddwt_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uaddwt.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svaddwt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svaddwt_u32'}}
+  return SVE_ACLE_FUNC(svaddwt,_u32,,)(op1, op2);
+}
+
+svuint64_t test_svaddwt_u64(svuint64_t op1, svuint32_t op2)
+{
+  // CHECK-LABEL: test_svaddwt_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uaddwt.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svaddwt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svaddwt_u64'}}
+  return SVE_ACLE_FUNC(svaddwt,_u64,,)(op1, op2);
+}
+
+svint16_t test_svaddwt_n_s16(svint16_t op1, int8_t op2)
+{
+  // CHECK-LABEL: test_svaddwt_n_s16
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.saddwt.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svaddwt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svaddwt_n_s16'}}
+  return SVE_ACLE_FUNC(svaddwt,_n_s16,,)(op1, op2);
+}
+
+svint32_t test_svaddwt_n_s32(svint32_t op1, int16_t op2)
+{
+  // CHECK-LABEL: test_svaddwt_n_s32
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.saddwt.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svaddwt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svaddwt_n_s32'}}
+  return SVE_ACLE_FUNC(svaddwt,_n_s32,,)(op1, op2);
+}
+
+svint64_t test_svaddwt_n_s64(svint64_t op1, int32_t op2)
+{
+  // CHECK-LABEL: test_svaddwt_n_s64
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.saddwt.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svaddwt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svaddwt_n_s64'}}
+  return SVE_ACLE_FUNC(svaddwt,_n_s64,,)(op1, op2);
+}
+
+svuint16_t test_svaddwt_n_u16(svuint16_t op1, uint8_t op2)
+{
+  // CHECK-LABEL: test_svaddwt_n_u16
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uaddwt.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svaddwt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svaddwt_n_u16'}}
+  return SVE_ACLE_FUNC(svaddwt,_n_u16,,)(op1, op2);
+}
+
+svuint32_t test_svaddwt_n_u32(svuint32_t op1, uint16_t op2)
+{
+  // CHECK-LABEL: test_svaddwt_n_u32
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uaddwt.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svaddwt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svaddwt_n_u32'}}
+  return SVE_ACLE_FUNC(svaddwt,_n_u32,,)(op1, op2);
+}
+
+svuint64_t test_svaddwt_n_u64(svuint64_t op1, uint32_t op2)
+{
+  // CHECK-LABEL: test_svaddwt_n_u64
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uaddwt.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svaddwt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svaddwt_n_u64'}}
+  return SVE_ACLE_FUNC(svaddwt,_n_u64,,)(op1, op2);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_mlalb.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_mlalb.c
new file mode 100644
index 000000000000..497d7eba5849
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_mlalb.c
@@ -0,0 +1,219 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svint16_t test_svmlalb_s16(svint16_t op1, svint8_t op2, svint8_t op3)
+{
+  // CHECK-LABEL: test_svmlalb_s16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.smlalb.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlalb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlalb_s16'}}
+  return SVE_ACLE_FUNC(svmlalb,_s16,,)(op1, op2, op3);
+}
+
+svint32_t test_svmlalb_s32(svint32_t op1, svint16_t op2, svint16_t op3)
+{
+  // CHECK-LABEL: test_svmlalb_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.smlalb.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlalb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlalb_s32'}}
+  return SVE_ACLE_FUNC(svmlalb,_s32,,)(op1, op2, op3);
+}
+
+svint64_t test_svmlalb_s64(svint64_t op1, svint32_t op2, svint32_t op3)
+{
+  // CHECK-LABEL: test_svmlalb_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.smlalb.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlalb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlalb_s64'}}
+  return SVE_ACLE_FUNC(svmlalb,_s64,,)(op1, op2, op3);
+}
+
+svuint16_t test_svmlalb_u16(svuint16_t op1, svuint8_t op2, svuint8_t op3)
+{
+  // CHECK-LABEL: test_svmlalb_u16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.umlalb.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlalb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlalb_u16'}}
+  return SVE_ACLE_FUNC(svmlalb,_u16,,)(op1, op2, op3);
+}
+
+svuint32_t test_svmlalb_u32(svuint32_t op1, svuint16_t op2, svuint16_t op3)
+{
+  // CHECK-LABEL: test_svmlalb_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.umlalb.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlalb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlalb_u32'}}
+  return SVE_ACLE_FUNC(svmlalb,_u32,,)(op1, op2, op3);
+}
+
+svuint64_t test_svmlalb_u64(svuint64_t op1, svuint32_t op2, svuint32_t op3)
+{
+  // CHECK-LABEL: test_svmlalb_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.umlalb.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlalb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlalb_u64'}}
+  return SVE_ACLE_FUNC(svmlalb,_u64,,)(op1, op2, op3);
+}
+
+svint16_t test_svmlalb_n_s16(svint16_t op1, svint8_t op2, int8_t op3)
+{
+  // CHECK-LABEL: test_svmlalb_n_s16
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.smlalb.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlalb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlalb_n_s16'}}
+  return SVE_ACLE_FUNC(svmlalb,_n_s16,,)(op1, op2, op3);
+}
+
+svint32_t test_svmlalb_n_s32(svint32_t op1, svint16_t op2, int16_t op3)
+{
+  // CHECK-LABEL: test_svmlalb_n_s32
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.smlalb.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlalb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlalb_n_s32'}}
+  return SVE_ACLE_FUNC(svmlalb,_n_s32,,)(op1, op2, op3);
+}
+
+svint64_t test_svmlalb_n_s64(svint64_t op1, svint32_t op2, int32_t op3)
+{
+  // CHECK-LABEL: test_svmlalb_n_s64
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.smlalb.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlalb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlalb_n_s64'}}
+  return SVE_ACLE_FUNC(svmlalb,_n_s64,,)(op1, op2, op3);
+}
+
+svuint16_t test_svmlalb_n_u16(svuint16_t op1, svuint8_t op2, uint8_t op3)
+{
+  // CHECK-LABEL: test_svmlalb_n_u16
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.umlalb.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlalb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlalb_n_u16'}}
+  return SVE_ACLE_FUNC(svmlalb,_n_u16,,)(op1, op2, op3);
+}
+
+svuint32_t test_svmlalb_n_u32(svuint32_t op1, svuint16_t op2, uint16_t op3)
+{
+  // CHECK-LABEL: test_svmlalb_n_u32
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.umlalb.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlalb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlalb_n_u32'}}
+  return SVE_ACLE_FUNC(svmlalb,_n_u32,,)(op1, op2, op3);
+}
+
+svuint64_t test_svmlalb_n_u64(svuint64_t op1, svuint32_t op2, uint32_t op3)
+{
+  // CHECK-LABEL: test_svmlalb_n_u64
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.umlalb.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlalb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlalb_n_u64'}}
+  return SVE_ACLE_FUNC(svmlalb,_n_u64,,)(op1, op2, op3);
+}
+
+svint32_t test_svmlalb_lane_s32(svint32_t op1, svint16_t op2, svint16_t op3)
+{
+  // CHECK-LABEL: test_svmlalb_lane_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.smlalb.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 0)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlalb_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlalb_lane_s32'}}
+  return SVE_ACLE_FUNC(svmlalb_lane,_s32,,)(op1, op2, op3, 0);
+}
+
+svint32_t test_svmlalb_lane_s32_1(svint32_t op1, svint16_t op2, svint16_t op3)
+{
+  // CHECK-LABEL: test_svmlalb_lane_s32_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.smlalb.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 7)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlalb_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlalb_lane_s32'}}
+  return SVE_ACLE_FUNC(svmlalb_lane,_s32,,)(op1, op2, op3, 7);
+}
+
+svint64_t test_svmlalb_lane_s64(svint64_t op1, svint32_t op2, svint32_t op3)
+{
+  // CHECK-LABEL: test_svmlalb_lane_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.smlalb.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 0)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlalb_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlalb_lane_s64'}}
+  return SVE_ACLE_FUNC(svmlalb_lane,_s64,,)(op1, op2, op3, 0);
+}
+
+svint64_t test_svmlalb_lane_s64_1(svint64_t op1, svint32_t op2, svint32_t op3)
+{
+  // CHECK-LABEL: test_svmlalb_lane_s64_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.smlalb.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 3)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlalb_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlalb_lane_s64'}}
+  return SVE_ACLE_FUNC(svmlalb_lane,_s64,,)(op1, op2, op3, 3);
+}
+
+svuint32_t test_svmlalb_lane_u32(svuint32_t op1, svuint16_t op2, svuint16_t op3)
+{
+  // CHECK-LABEL: test_svmlalb_lane_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.umlalb.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 0)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlalb_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlalb_lane_u32'}}
+  return SVE_ACLE_FUNC(svmlalb_lane,_u32,,)(op1, op2, op3, 0);
+}
+
+svuint32_t test_svmlalb_lane_u32_1(svuint32_t op1, svuint16_t op2, svuint16_t op3)
+{
+  // CHECK-LABEL: test_svmlalb_lane_u32_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.umlalb.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 7)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlalb_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlalb_lane_u32'}}
+  return SVE_ACLE_FUNC(svmlalb_lane,_u32,,)(op1, op2, op3, 7);
+}
+
+svuint64_t test_svmlalb_lane_u64(svuint64_t op1, svuint32_t op2, svuint32_t op3)
+{
+  // CHECK-LABEL: test_svmlalb_lane_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.umlalb.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 0)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlalb_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlalb_lane_u64'}}
+  return SVE_ACLE_FUNC(svmlalb_lane,_u64,,)(op1, op2, op3, 0);
+}
+
+svuint64_t test_svmlalb_lane_u64_1(svuint64_t op1, svuint32_t op2, svuint32_t op3)
+{
+  // CHECK-LABEL: test_svmlalb_lane_u64_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.umlalb.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 3)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlalb_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlalb_lane_u64'}}
+  return SVE_ACLE_FUNC(svmlalb_lane,_u64,,)(op1, op2, op3, 3);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_mlalt.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_mlalt.c
new file mode 100644
index 000000000000..aaceed871a36
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_mlalt.c
@@ -0,0 +1,219 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svint16_t test_svmlalt_s16(svint16_t op1, svint8_t op2, svint8_t op3)
+{
+  // CHECK-LABEL: test_svmlalt_s16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.smlalt.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlalt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlalt_s16'}}
+  return SVE_ACLE_FUNC(svmlalt,_s16,,)(op1, op2, op3);
+}
+
+svint32_t test_svmlalt_s32(svint32_t op1, svint16_t op2, svint16_t op3)
+{
+  // CHECK-LABEL: test_svmlalt_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.smlalt.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlalt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlalt_s32'}}
+  return SVE_ACLE_FUNC(svmlalt,_s32,,)(op1, op2, op3);
+}
+
+svint64_t test_svmlalt_s64(svint64_t op1, svint32_t op2, svint32_t op3)
+{
+  // CHECK-LABEL: test_svmlalt_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.smlalt.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlalt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlalt_s64'}}
+  return SVE_ACLE_FUNC(svmlalt,_s64,,)(op1, op2, op3);
+}
+
+svuint16_t test_svmlalt_u16(svuint16_t op1, svuint8_t op2, svuint8_t op3)
+{
+  // CHECK-LABEL: test_svmlalt_u16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.umlalt.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlalt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlalt_u16'}}
+  return SVE_ACLE_FUNC(svmlalt,_u16,,)(op1, op2, op3);
+}
+
+svuint32_t test_svmlalt_u32(svuint32_t op1, svuint16_t op2, svuint16_t op3)
+{
+  // CHECK-LABEL: test_svmlalt_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.umlalt.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlalt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlalt_u32'}}
+  return SVE_ACLE_FUNC(svmlalt,_u32,,)(op1, op2, op3);
+}
+
+svuint64_t test_svmlalt_u64(svuint64_t op1, svuint32_t op2, svuint32_t op3)
+{
+  // CHECK-LABEL: test_svmlalt_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.umlalt.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlalt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlalt_u64'}}
+  return SVE_ACLE_FUNC(svmlalt,_u64,,)(op1, op2, op3);
+}
+
+svint16_t test_svmlalt_n_s16(svint16_t op1, svint8_t op2, int8_t op3)
+{
+  // CHECK-LABEL: test_svmlalt_n_s16
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.smlalt.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlalt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlalt_n_s16'}}
+  return SVE_ACLE_FUNC(svmlalt,_n_s16,,)(op1, op2, op3);
+}
+
+svint32_t test_svmlalt_n_s32(svint32_t op1, svint16_t op2, int16_t op3)
+{
+  // CHECK-LABEL: test_svmlalt_n_s32
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.smlalt.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlalt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlalt_n_s32'}}
+  return SVE_ACLE_FUNC(svmlalt,_n_s32,,)(op1, op2, op3);
+}
+
+svint64_t test_svmlalt_n_s64(svint64_t op1, svint32_t op2, int32_t op3)
+{
+  // CHECK-LABEL: test_svmlalt_n_s64
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.smlalt.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlalt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlalt_n_s64'}}
+  return SVE_ACLE_FUNC(svmlalt,_n_s64,,)(op1, op2, op3);
+}
+
+svuint16_t test_svmlalt_n_u16(svuint16_t op1, svuint8_t op2, uint8_t op3)
+{
+  // CHECK-LABEL: test_svmlalt_n_u16
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.umlalt.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlalt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlalt_n_u16'}}
+  return SVE_ACLE_FUNC(svmlalt,_n_u16,,)(op1, op2, op3);
+}
+
+svuint32_t test_svmlalt_n_u32(svuint32_t op1, svuint16_t op2, uint16_t op3)
+{
+  // CHECK-LABEL: test_svmlalt_n_u32
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.umlalt.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlalt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlalt_n_u32'}}
+  return SVE_ACLE_FUNC(svmlalt,_n_u32,,)(op1, op2, op3);
+}
+
+svuint64_t test_svmlalt_n_u64(svuint64_t op1, svuint32_t op2, uint32_t op3)
+{
+  // CHECK-LABEL: test_svmlalt_n_u64
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.umlalt.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlalt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlalt_n_u64'}}
+  return SVE_ACLE_FUNC(svmlalt,_n_u64,,)(op1, op2, op3);
+}
+
+svint32_t test_svmlalt_lane_s32(svint32_t op1, svint16_t op2, svint16_t op3)
+{
+  // CHECK-LABEL: test_svmlalt_lane_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.smlalt.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 0)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlalt_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlalt_lane_s32'}}
+  return SVE_ACLE_FUNC(svmlalt_lane,_s32,,)(op1, op2, op3, 0);
+}
+
+svint32_t test_svmlalt_lane_s32_1(svint32_t op1, svint16_t op2, svint16_t op3)
+{
+  // CHECK-LABEL: test_svmlalt_lane_s32_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.smlalt.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 7)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlalt_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlalt_lane_s32'}}
+  return SVE_ACLE_FUNC(svmlalt_lane,_s32,,)(op1, op2, op3, 7);
+}
+
+svint64_t test_svmlalt_lane_s64(svint64_t op1, svint32_t op2, svint32_t op3)
+{
+  // CHECK-LABEL: test_svmlalt_lane_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.smlalt.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 0)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlalt_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlalt_lane_s64'}}
+  return SVE_ACLE_FUNC(svmlalt_lane,_s64,,)(op1, op2, op3, 0);
+}
+
+svint64_t test_svmlalt_lane_s64_1(svint64_t op1, svint32_t op2, svint32_t op3)
+{
+  // CHECK-LABEL: test_svmlalt_lane_s64_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.smlalt.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 3)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlalt_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlalt_lane_s64'}}
+  return SVE_ACLE_FUNC(svmlalt_lane,_s64,,)(op1, op2, op3, 3);
+}
+
+svuint32_t test_svmlalt_lane_u32(svuint32_t op1, svuint16_t op2, svuint16_t op3)
+{
+  // CHECK-LABEL: test_svmlalt_lane_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.umlalt.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 0)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlalt_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlalt_lane_u32'}}
+  return SVE_ACLE_FUNC(svmlalt_lane,_u32,,)(op1, op2, op3, 0);
+}
+
+svuint32_t test_svmlalt_lane_u32_1(svuint32_t op1, svuint16_t op2, svuint16_t op3)
+{
+  // CHECK-LABEL: test_svmlalt_lane_u32_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.umlalt.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 7)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlalt_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlalt_lane_u32'}}
+  return SVE_ACLE_FUNC(svmlalt_lane,_u32,,)(op1, op2, op3, 7);
+}
+
+svuint64_t test_svmlalt_lane_u64(svuint64_t op1, svuint32_t op2, svuint32_t op3)
+{
+  // CHECK-LABEL: test_svmlalt_lane_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.umlalt.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 0)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlalt_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlalt_lane_u64'}}
+  return SVE_ACLE_FUNC(svmlalt_lane,_u64,,)(op1, op2, op3, 0);
+}
+
+svuint64_t test_svmlalt_lane_u64_1(svuint64_t op1, svuint32_t op2, svuint32_t op3)
+{
+  // CHECK-LABEL: test_svmlalt_lane_u64_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.umlalt.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 3)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlalt_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlalt_lane_u64'}}
+  return SVE_ACLE_FUNC(svmlalt_lane,_u64,,)(op1, op2, op3, 3);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_mlslb.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_mlslb.c
new file mode 100644
index 000000000000..f4d49194cc2f
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_mlslb.c
@@ -0,0 +1,219 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svint16_t test_svmlslb_s16(svint16_t op1, svint8_t op2, svint8_t op3)
+{
+  // CHECK-LABEL: test_svmlslb_s16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.smlslb.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlslb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlslb_s16'}}
+  return SVE_ACLE_FUNC(svmlslb,_s16,,)(op1, op2, op3);
+}
+
+svint32_t test_svmlslb_s32(svint32_t op1, svint16_t op2, svint16_t op3)
+{
+  // CHECK-LABEL: test_svmlslb_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.smlslb.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlslb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlslb_s32'}}
+  return SVE_ACLE_FUNC(svmlslb,_s32,,)(op1, op2, op3);
+}
+
+svint64_t test_svmlslb_s64(svint64_t op1, svint32_t op2, svint32_t op3)
+{
+  // CHECK-LABEL: test_svmlslb_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.smlslb.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlslb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlslb_s64'}}
+  return SVE_ACLE_FUNC(svmlslb,_s64,,)(op1, op2, op3);
+}
+
+svuint16_t test_svmlslb_u16(svuint16_t op1, svuint8_t op2, svuint8_t op3)
+{
+  // CHECK-LABEL: test_svmlslb_u16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.umlslb.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlslb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlslb_u16'}}
+  return SVE_ACLE_FUNC(svmlslb,_u16,,)(op1, op2, op3);
+}
+
+svuint32_t test_svmlslb_u32(svuint32_t op1, svuint16_t op2, svuint16_t op3)
+{
+  // CHECK-LABEL: test_svmlslb_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.umlslb.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlslb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlslb_u32'}}
+  return SVE_ACLE_FUNC(svmlslb,_u32,,)(op1, op2, op3);
+}
+
+svuint64_t test_svmlslb_u64(svuint64_t op1, svuint32_t op2, svuint32_t op3)
+{
+  // CHECK-LABEL: test_svmlslb_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.umlslb.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlslb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlslb_u64'}}
+  return SVE_ACLE_FUNC(svmlslb,_u64,,)(op1, op2, op3);
+}
+
+svint16_t test_svmlslb_n_s16(svint16_t op1, svint8_t op2, int8_t op3)
+{
+  // CHECK-LABEL: test_svmlslb_n_s16
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.smlslb.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlslb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlslb_n_s16'}}
+  return SVE_ACLE_FUNC(svmlslb,_n_s16,,)(op1, op2, op3);
+}
+
+svint32_t test_svmlslb_n_s32(svint32_t op1, svint16_t op2, int16_t op3)
+{
+  // CHECK-LABEL: test_svmlslb_n_s32
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.smlslb.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlslb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlslb_n_s32'}}
+  return SVE_ACLE_FUNC(svmlslb,_n_s32,,)(op1, op2, op3);
+}
+
+svint64_t test_svmlslb_n_s64(svint64_t op1, svint32_t op2, int32_t op3)
+{
+  // CHECK-LABEL: test_svmlslb_n_s64
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.smlslb.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlslb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlslb_n_s64'}}
+  return SVE_ACLE_FUNC(svmlslb,_n_s64,,)(op1, op2, op3);
+}
+
+svuint16_t test_svmlslb_n_u16(svuint16_t op1, svuint8_t op2, uint8_t op3)
+{
+  // CHECK-LABEL: test_svmlslb_n_u16
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.umlslb.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlslb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlslb_n_u16'}}
+  return SVE_ACLE_FUNC(svmlslb,_n_u16,,)(op1, op2, op3);
+}
+
+svuint32_t test_svmlslb_n_u32(svuint32_t op1, svuint16_t op2, uint16_t op3)
+{
+  // CHECK-LABEL: test_svmlslb_n_u32
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.umlslb.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlslb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlslb_n_u32'}}
+  return SVE_ACLE_FUNC(svmlslb,_n_u32,,)(op1, op2, op3);
+}
+
+svuint64_t test_svmlslb_n_u64(svuint64_t op1, svuint32_t op2, uint32_t op3)
+{
+  // CHECK-LABEL: test_svmlslb_n_u64
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.umlslb.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlslb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlslb_n_u64'}}
+  return SVE_ACLE_FUNC(svmlslb,_n_u64,,)(op1, op2, op3);
+}
+
+svint32_t test_svmlslb_lane_s32(svint32_t op1, svint16_t op2, svint16_t op3)
+{
+  // CHECK-LABEL: test_svmlslb_lane_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.smlslb.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 0)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlslb_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlslb_lane_s32'}}
+  return SVE_ACLE_FUNC(svmlslb_lane,_s32,,)(op1, op2, op3, 0);
+}
+
+svint32_t test_svmlslb_lane_s32_1(svint32_t op1, svint16_t op2, svint16_t op3)
+{
+  // CHECK-LABEL: test_svmlslb_lane_s32_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.smlslb.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 7)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlslb_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlslb_lane_s32'}}
+  return SVE_ACLE_FUNC(svmlslb_lane,_s32,,)(op1, op2, op3, 7);
+}
+
+svint64_t test_svmlslb_lane_s64(svint64_t op1, svint32_t op2, svint32_t op3)
+{
+  // CHECK-LABEL: test_svmlslb_lane_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.smlslb.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 0)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlslb_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlslb_lane_s64'}}
+  return SVE_ACLE_FUNC(svmlslb_lane,_s64,,)(op1, op2, op3, 0);
+}
+
+svint64_t test_svmlslb_lane_s64_1(svint64_t op1, svint32_t op2, svint32_t op3)
+{
+  // CHECK-LABEL: test_svmlslb_lane_s64_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.smlslb.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 3)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlslb_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlslb_lane_s64'}}
+  return SVE_ACLE_FUNC(svmlslb_lane,_s64,,)(op1, op2, op3, 3);
+}
+
+svuint32_t test_svmlslb_lane_u32(svuint32_t op1, svuint16_t op2, svuint16_t op3)
+{
+  // CHECK-LABEL: test_svmlslb_lane_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.umlslb.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 0)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlslb_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlslb_lane_u32'}}
+  return SVE_ACLE_FUNC(svmlslb_lane,_u32,,)(op1, op2, op3, 0);
+}
+
+svuint32_t test_svmlslb_lane_u32_1(svuint32_t op1, svuint16_t op2, svuint16_t op3)
+{
+  // CHECK-LABEL: test_svmlslb_lane_u32_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.umlslb.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 7)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlslb_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlslb_lane_u32'}}
+  return SVE_ACLE_FUNC(svmlslb_lane,_u32,,)(op1, op2, op3, 7);
+}
+
+svuint64_t test_svmlslb_lane_u64(svuint64_t op1, svuint32_t op2, svuint32_t op3)
+{
+  // CHECK-LABEL: test_svmlslb_lane_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.umlslb.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 0)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlslb_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlslb_lane_u64'}}
+  return SVE_ACLE_FUNC(svmlslb_lane,_u64,,)(op1, op2, op3, 0);
+}
+
+svuint64_t test_svmlslb_lane_u64_1(svuint64_t op1, svuint32_t op2, svuint32_t op3)
+{
+  // CHECK-LABEL: test_svmlslb_lane_u64_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.umlslb.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 3)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlslb_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlslb_lane_u64'}}
+  return SVE_ACLE_FUNC(svmlslb_lane,_u64,,)(op1, op2, op3, 3);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_mlslt.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_mlslt.c
new file mode 100644
index 000000000000..1a3583e38714
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_mlslt.c
@@ -0,0 +1,219 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svint16_t test_svmlslt_s16(svint16_t op1, svint8_t op2, svint8_t op3)
+{
+  // CHECK-LABEL: test_svmlslt_s16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.smlslt.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlslt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlslt_s16'}}
+  return SVE_ACLE_FUNC(svmlslt,_s16,,)(op1, op2, op3);
+}
+
+svint32_t test_svmlslt_s32(svint32_t op1, svint16_t op2, svint16_t op3)
+{
+  // CHECK-LABEL: test_svmlslt_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.smlslt.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlslt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlslt_s32'}}
+  return SVE_ACLE_FUNC(svmlslt,_s32,,)(op1, op2, op3);
+}
+
+svint64_t test_svmlslt_s64(svint64_t op1, svint32_t op2, svint32_t op3)
+{
+  // CHECK-LABEL: test_svmlslt_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.smlslt.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlslt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlslt_s64'}}
+  return SVE_ACLE_FUNC(svmlslt,_s64,,)(op1, op2, op3);
+}
+
+svuint16_t test_svmlslt_u16(svuint16_t op1, svuint8_t op2, svuint8_t op3)
+{
+  // CHECK-LABEL: test_svmlslt_u16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.umlslt.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlslt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlslt_u16'}}
+  return SVE_ACLE_FUNC(svmlslt,_u16,,)(op1, op2, op3);
+}
+
+svuint32_t test_svmlslt_u32(svuint32_t op1, svuint16_t op2, svuint16_t op3)
+{
+  // CHECK-LABEL: test_svmlslt_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.umlslt.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlslt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlslt_u32'}}
+  return SVE_ACLE_FUNC(svmlslt,_u32,,)(op1, op2, op3);
+}
+
+svuint64_t test_svmlslt_u64(svuint64_t op1, svuint32_t op2, svuint32_t op3)
+{
+  // CHECK-LABEL: test_svmlslt_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.umlslt.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlslt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlslt_u64'}}
+  return SVE_ACLE_FUNC(svmlslt,_u64,,)(op1, op2, op3);
+}
+
+svint16_t test_svmlslt_n_s16(svint16_t op1, svint8_t op2, int8_t op3)
+{
+  // CHECK-LABEL: test_svmlslt_n_s16
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.smlslt.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlslt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlslt_n_s16'}}
+  return SVE_ACLE_FUNC(svmlslt,_n_s16,,)(op1, op2, op3);
+}
+
+svint32_t test_svmlslt_n_s32(svint32_t op1, svint16_t op2, int16_t op3)
+{
+  // CHECK-LABEL: test_svmlslt_n_s32
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.smlslt.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlslt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlslt_n_s32'}}
+  return SVE_ACLE_FUNC(svmlslt,_n_s32,,)(op1, op2, op3);
+}
+
+svint64_t test_svmlslt_n_s64(svint64_t op1, svint32_t op2, int32_t op3)
+{
+  // CHECK-LABEL: test_svmlslt_n_s64
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.smlslt.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlslt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlslt_n_s64'}}
+  return SVE_ACLE_FUNC(svmlslt,_n_s64,,)(op1, op2, op3);
+}
+
+svuint16_t test_svmlslt_n_u16(svuint16_t op1, svuint8_t op2, uint8_t op3)
+{
+  // CHECK-LABEL: test_svmlslt_n_u16
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.umlslt.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlslt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlslt_n_u16'}}
+  return SVE_ACLE_FUNC(svmlslt,_n_u16,,)(op1, op2, op3);
+}
+
+svuint32_t test_svmlslt_n_u32(svuint32_t op1, svuint16_t op2, uint16_t op3)
+{
+  // CHECK-LABEL: test_svmlslt_n_u32
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.umlslt.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlslt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlslt_n_u32'}}
+  return SVE_ACLE_FUNC(svmlslt,_n_u32,,)(op1, op2, op3);
+}
+
+svuint64_t test_svmlslt_n_u64(svuint64_t op1, svuint32_t op2, uint32_t op3)
+{
+  // CHECK-LABEL: test_svmlslt_n_u64
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.umlslt.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlslt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlslt_n_u64'}}
+  return SVE_ACLE_FUNC(svmlslt,_n_u64,,)(op1, op2, op3);
+}
+
+svint32_t test_svmlslt_lane_s32(svint32_t op1, svint16_t op2, svint16_t op3)
+{
+  // CHECK-LABEL: test_svmlslt_lane_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.smlslt.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 0)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlslt_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlslt_lane_s32'}}
+  return SVE_ACLE_FUNC(svmlslt_lane,_s32,,)(op1, op2, op3, 0);
+}
+
+svint32_t test_svmlslt_lane_s32_1(svint32_t op1, svint16_t op2, svint16_t op3)
+{
+  // CHECK-LABEL: test_svmlslt_lane_s32_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.smlslt.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 7)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlslt_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlslt_lane_s32'}}
+  return SVE_ACLE_FUNC(svmlslt_lane,_s32,,)(op1, op2, op3, 7);
+}
+
+svint64_t test_svmlslt_lane_s64(svint64_t op1, svint32_t op2, svint32_t op3)
+{
+  // CHECK-LABEL: test_svmlslt_lane_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.smlslt.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 0)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlslt_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlslt_lane_s64'}}
+  return SVE_ACLE_FUNC(svmlslt_lane,_s64,,)(op1, op2, op3, 0);
+}
+
+svint64_t test_svmlslt_lane_s64_1(svint64_t op1, svint32_t op2, svint32_t op3)
+{
+  // CHECK-LABEL: test_svmlslt_lane_s64_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.smlslt.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 3)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlslt_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlslt_lane_s64'}}
+  return SVE_ACLE_FUNC(svmlslt_lane,_s64,,)(op1, op2, op3, 3);
+}
+
+svuint32_t test_svmlslt_lane_u32(svuint32_t op1, svuint16_t op2, svuint16_t op3)
+{
+  // CHECK-LABEL: test_svmlslt_lane_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.umlslt.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 0)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlslt_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlslt_lane_u32'}}
+  return SVE_ACLE_FUNC(svmlslt_lane,_u32,,)(op1, op2, op3, 0);
+}
+
+svuint32_t test_svmlslt_lane_u32_1(svuint32_t op1, svuint16_t op2, svuint16_t op3)
+{
+  // CHECK-LABEL: test_svmlslt_lane_u32_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.umlslt.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 7)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlslt_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlslt_lane_u32'}}
+  return SVE_ACLE_FUNC(svmlslt_lane,_u32,,)(op1, op2, op3, 7);
+}
+
+svuint64_t test_svmlslt_lane_u64(svuint64_t op1, svuint32_t op2, svuint32_t op3)
+{
+  // CHECK-LABEL: test_svmlslt_lane_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.umlslt.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 0)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlslt_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlslt_lane_u64'}}
+  return SVE_ACLE_FUNC(svmlslt_lane,_u64,,)(op1, op2, op3, 0);
+}
+
+svuint64_t test_svmlslt_lane_u64_1(svuint64_t op1, svuint32_t op2, svuint32_t op3)
+{
+  // CHECK-LABEL: test_svmlslt_lane_u64_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.umlslt.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 3)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmlslt_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmlslt_lane_u64'}}
+  return SVE_ACLE_FUNC(svmlslt_lane,_u64,,)(op1, op2, op3, 3);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_mullb.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_mullb.c
new file mode 100644
index 000000000000..fca203bb60b9
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_mullb.c
@@ -0,0 +1,219 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svint16_t test_svmullb_s16(svint8_t op1, svint8_t op2)
+{
+  // CHECK-LABEL: test_svmullb_s16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.smullb.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmullb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmullb_s16'}}
+  return SVE_ACLE_FUNC(svmullb,_s16,,)(op1, op2);
+}
+
+svint32_t test_svmullb_s32(svint16_t op1, svint16_t op2)
+{
+  // CHECK-LABEL: test_svmullb_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.smullb.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmullb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmullb_s32'}}
+  return SVE_ACLE_FUNC(svmullb,_s32,,)(op1, op2);
+}
+
+svint64_t test_svmullb_s64(svint32_t op1, svint32_t op2)
+{
+  // CHECK-LABEL: test_svmullb_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.smullb.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmullb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmullb_s64'}}
+  return SVE_ACLE_FUNC(svmullb,_s64,,)(op1, op2);
+}
+
+svuint16_t test_svmullb_u16(svuint8_t op1, svuint8_t op2)
+{
+  // CHECK-LABEL: test_svmullb_u16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.umullb.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmullb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmullb_u16'}}
+  return SVE_ACLE_FUNC(svmullb,_u16,,)(op1, op2);
+}
+
+svuint32_t test_svmullb_u32(svuint16_t op1, svuint16_t op2)
+{
+  // CHECK-LABEL: test_svmullb_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.umullb.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmullb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmullb_u32'}}
+  return SVE_ACLE_FUNC(svmullb,_u32,,)(op1, op2);
+}
+
+svuint64_t test_svmullb_u64(svuint32_t op1, svuint32_t op2)
+{
+  // CHECK-LABEL: test_svmullb_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.umullb.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmullb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmullb_u64'}}
+  return SVE_ACLE_FUNC(svmullb,_u64,,)(op1, op2);
+}
+
+svint16_t test_svmullb_n_s16(svint8_t op1, int8_t op2)
+{
+  // CHECK-LABEL: test_svmullb_n_s16
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.smullb.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmullb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmullb_n_s16'}}
+  return SVE_ACLE_FUNC(svmullb,_n_s16,,)(op1, op2);
+}
+
+svint32_t test_svmullb_n_s32(svint16_t op1, int16_t op2)
+{
+  // CHECK-LABEL: test_svmullb_n_s32
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.smullb.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmullb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmullb_n_s32'}}
+  return SVE_ACLE_FUNC(svmullb,_n_s32,,)(op1, op2);
+}
+
+svint64_t test_svmullb_n_s64(svint32_t op1, int32_t op2)
+{
+  // CHECK-LABEL: test_svmullb_n_s64
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.smullb.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmullb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmullb_n_s64'}}
+  return SVE_ACLE_FUNC(svmullb,_n_s64,,)(op1, op2);
+}
+
+svuint16_t test_svmullb_n_u16(svuint8_t op1, uint8_t op2)
+{
+  // CHECK-LABEL: test_svmullb_n_u16
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.umullb.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmullb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmullb_n_u16'}}
+  return SVE_ACLE_FUNC(svmullb,_n_u16,,)(op1, op2);
+}
+
+svuint32_t test_svmullb_n_u32(svuint16_t op1, uint16_t op2)
+{
+  // CHECK-LABEL: test_svmullb_n_u32
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.umullb.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmullb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmullb_n_u32'}}
+  return SVE_ACLE_FUNC(svmullb,_n_u32,,)(op1, op2);
+}
+
+svuint64_t test_svmullb_n_u64(svuint32_t op1, uint32_t op2)
+{
+  // CHECK-LABEL: test_svmullb_n_u64
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.umullb.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmullb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmullb_n_u64'}}
+  return SVE_ACLE_FUNC(svmullb,_n_u64,,)(op1, op2);
+}
+
+svint32_t test_svmullb_lane_s32(svint16_t op1, svint16_t op2)
+{
+  // CHECK-LABEL: test_svmullb_lane_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.smullb.lane.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 0)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmullb_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmullb_lane_s32'}}
+  return SVE_ACLE_FUNC(svmullb_lane,_s32,,)(op1, op2, 0);
+}
+
+svint32_t test_svmullb_lane_s32_1(svint16_t op1, svint16_t op2)
+{
+  // CHECK-LABEL: test_svmullb_lane_s32_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.smullb.lane.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 7)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmullb_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmullb_lane_s32'}}
+  return SVE_ACLE_FUNC(svmullb_lane,_s32,,)(op1, op2, 7);
+}
+
+svint64_t test_svmullb_lane_s64(svint32_t op1, svint32_t op2)
+{
+  // CHECK-LABEL: test_svmullb_lane_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.smullb.lane.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 0)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmullb_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmullb_lane_s64'}}
+  return SVE_ACLE_FUNC(svmullb_lane,_s64,,)(op1, op2, 0);
+}
+
+svint64_t test_svmullb_lane_s64_1(svint32_t op1, svint32_t op2)
+{
+  // CHECK-LABEL: test_svmullb_lane_s64_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.smullb.lane.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 3)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmullb_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmullb_lane_s64'}}
+  return SVE_ACLE_FUNC(svmullb_lane,_s64,,)(op1, op2, 3);
+}
+
+svuint32_t test_svmullb_lane_u32(svuint16_t op1, svuint16_t op2)
+{
+  // CHECK-LABEL: test_svmullb_lane_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.umullb.lane.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 0)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmullb_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmullb_lane_u32'}}
+  return SVE_ACLE_FUNC(svmullb_lane,_u32,,)(op1, op2, 0);
+}
+
+svuint32_t test_svmullb_lane_u32_1(svuint16_t op1, svuint16_t op2)
+{
+  // CHECK-LABEL: test_svmullb_lane_u32_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.umullb.lane.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 7)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmullb_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmullb_lane_u32'}}
+  return SVE_ACLE_FUNC(svmullb_lane,_u32,,)(op1, op2, 7);
+}
+
+svuint64_t test_svmullb_lane_u64(svuint32_t op1, svuint32_t op2)
+{
+  // CHECK-LABEL: test_svmullb_lane_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.umullb.lane.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 0)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmullb_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmullb_lane_u64'}}
+  return SVE_ACLE_FUNC(svmullb_lane,_u64,,)(op1, op2, 0);
+}
+
+svuint64_t test_svmullb_lane_u64_1(svuint32_t op1, svuint32_t op2)
+{
+  // CHECK-LABEL: test_svmullb_lane_u64_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.umullb.lane.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 3)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmullb_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmullb_lane_u64'}}
+  return SVE_ACLE_FUNC(svmullb_lane,_u64,,)(op1, op2, 3);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_mullt.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_mullt.c
new file mode 100644
index 000000000000..9590d4aa895c
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_mullt.c
@@ -0,0 +1,219 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svint16_t test_svmullt_s16(svint8_t op1, svint8_t op2)
+{
+  // CHECK-LABEL: test_svmullt_s16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.smullt.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmullt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmullt_s16'}}
+  return SVE_ACLE_FUNC(svmullt,_s16,,)(op1, op2);
+}
+
+svint32_t test_svmullt_s32(svint16_t op1, svint16_t op2)
+{
+  // CHECK-LABEL: test_svmullt_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.smullt.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmullt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmullt_s32'}}
+  return SVE_ACLE_FUNC(svmullt,_s32,,)(op1, op2);
+}
+
+svint64_t test_svmullt_s64(svint32_t op1, svint32_t op2)
+{
+  // CHECK-LABEL: test_svmullt_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.smullt.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmullt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmullt_s64'}}
+  return SVE_ACLE_FUNC(svmullt,_s64,,)(op1, op2);
+}
+
+svuint16_t test_svmullt_u16(svuint8_t op1, svuint8_t op2)
+{
+  // CHECK-LABEL: test_svmullt_u16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.umullt.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmullt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmullt_u16'}}
+  return SVE_ACLE_FUNC(svmullt,_u16,,)(op1, op2);
+}
+
+svuint32_t test_svmullt_u32(svuint16_t op1, svuint16_t op2)
+{
+  // CHECK-LABEL: test_svmullt_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.umullt.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmullt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmullt_u32'}}
+  return SVE_ACLE_FUNC(svmullt,_u32,,)(op1, op2);
+}
+
+svuint64_t test_svmullt_u64(svuint32_t op1, svuint32_t op2)
+{
+  // CHECK-LABEL: test_svmullt_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.umullt.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmullt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmullt_u64'}}
+  return SVE_ACLE_FUNC(svmullt,_u64,,)(op1, op2);
+}
+
+svint16_t test_svmullt_n_s16(svint8_t op1, int8_t op2)
+{
+  // CHECK-LABEL: test_svmullt_n_s16
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.smullt.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmullt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmullt_n_s16'}}
+  return SVE_ACLE_FUNC(svmullt,_n_s16,,)(op1, op2);
+}
+
+svint32_t test_svmullt_n_s32(svint16_t op1, int16_t op2)
+{
+  // CHECK-LABEL: test_svmullt_n_s32
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.smullt.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmullt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmullt_n_s32'}}
+  return SVE_ACLE_FUNC(svmullt,_n_s32,,)(op1, op2);
+}
+
+svint64_t test_svmullt_n_s64(svint32_t op1, int32_t op2)
+{
+  // CHECK-LABEL: test_svmullt_n_s64
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.smullt.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmullt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmullt_n_s64'}}
+  return SVE_ACLE_FUNC(svmullt,_n_s64,,)(op1, op2);
+}
+
+svuint16_t test_svmullt_n_u16(svuint8_t op1, uint8_t op2)
+{
+  // CHECK-LABEL: test_svmullt_n_u16
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.umullt.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmullt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmullt_n_u16'}}
+  return SVE_ACLE_FUNC(svmullt,_n_u16,,)(op1, op2);
+}
+
+svuint32_t test_svmullt_n_u32(svuint16_t op1, uint16_t op2)
+{
+  // CHECK-LABEL: test_svmullt_n_u32
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.umullt.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmullt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmullt_n_u32'}}
+  return SVE_ACLE_FUNC(svmullt,_n_u32,,)(op1, op2);
+}
+
+svuint64_t test_svmullt_n_u64(svuint32_t op1, uint32_t op2)
+{
+  // CHECK-LABEL: test_svmullt_n_u64
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.umullt.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmullt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmullt_n_u64'}}
+  return SVE_ACLE_FUNC(svmullt,_n_u64,,)(op1, op2);
+}
+
+svint32_t test_svmullt_lane_s32(svint16_t op1, svint16_t op2)
+{
+  // CHECK-LABEL: test_svmullt_lane_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.smullt.lane.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 0)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmullt_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmullt_lane_s32'}}
+  return SVE_ACLE_FUNC(svmullt_lane,_s32,,)(op1, op2, 0);
+}
+
+svint32_t test_svmullt_lane_s32_1(svint16_t op1, svint16_t op2)
+{
+  // CHECK-LABEL: test_svmullt_lane_s32_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.smullt.lane.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 7)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmullt_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmullt_lane_s32'}}
+  return SVE_ACLE_FUNC(svmullt_lane,_s32,,)(op1, op2, 7);
+}
+
+svint64_t test_svmullt_lane_s64(svint32_t op1, svint32_t op2)
+{
+  // CHECK-LABEL: test_svmullt_lane_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.smullt.lane.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 0)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmullt_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmullt_lane_s64'}}
+  return SVE_ACLE_FUNC(svmullt_lane,_s64,,)(op1, op2, 0);
+}
+
+svint64_t test_svmullt_lane_s64_1(svint32_t op1, svint32_t op2)
+{
+  // CHECK-LABEL: test_svmullt_lane_s64_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.smullt.lane.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 3)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmullt_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmullt_lane_s64'}}
+  return SVE_ACLE_FUNC(svmullt_lane,_s64,,)(op1, op2, 3);
+}
+
+svuint32_t test_svmullt_lane_u32(svuint16_t op1, svuint16_t op2)
+{
+  // CHECK-LABEL: test_svmullt_lane_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.umullt.lane.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 0)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmullt_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmullt_lane_u32'}}
+  return SVE_ACLE_FUNC(svmullt_lane,_u32,,)(op1, op2, 0);
+}
+
+svuint32_t test_svmullt_lane_u32_1(svuint16_t op1, svuint16_t op2)
+{
+  // CHECK-LABEL: test_svmullt_lane_u32_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.umullt.lane.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 7)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmullt_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmullt_lane_u32'}}
+  return SVE_ACLE_FUNC(svmullt_lane,_u32,,)(op1, op2, 7);
+}
+
+svuint64_t test_svmullt_lane_u64(svuint32_t op1, svuint32_t op2)
+{
+  // CHECK-LABEL: test_svmullt_lane_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.umullt.lane.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 0)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmullt_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmullt_lane_u64'}}
+  return SVE_ACLE_FUNC(svmullt_lane,_u64,,)(op1, op2, 0);
+}
+
+svuint64_t test_svmullt_lane_u64_1(svuint32_t op1, svuint32_t op2)
+{
+  // CHECK-LABEL: test_svmullt_lane_u64_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.umullt.lane.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 3)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svmullt_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svmullt_lane_u64'}}
+  return SVE_ACLE_FUNC(svmullt_lane,_u64,,)(op1, op2, 3);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qdmlalb.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qdmlalb.c
new file mode 100644
index 000000000000..f1658279f15f
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qdmlalb.c
@@ -0,0 +1,116 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svint16_t test_svqdmlalb_s16(svint16_t op1, svint8_t op2, svint8_t op3)
+{
+  // CHECK-LABEL: test_svqdmlalb_s16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqdmlalb.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmlalb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmlalb_s16'}}
+  return SVE_ACLE_FUNC(svqdmlalb,_s16,,)(op1, op2, op3);
+}
+
+svint32_t test_svqdmlalb_s32(svint32_t op1, svint16_t op2, svint16_t op3)
+{
+  // CHECK-LABEL: test_svqdmlalb_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlalb.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmlalb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmlalb_s32'}}
+  return SVE_ACLE_FUNC(svqdmlalb,_s32,,)(op1, op2, op3);
+}
+
+svint64_t test_svqdmlalb_s64(svint64_t op1, svint32_t op2, svint32_t op3)
+{
+  // CHECK-LABEL: test_svqdmlalb_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlalb.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmlalb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmlalb_s64'}}
+  return SVE_ACLE_FUNC(svqdmlalb,_s64,,)(op1, op2, op3);
+}
+
+svint16_t test_svqdmlalb_n_s16(svint16_t op1, svint8_t op2, int8_t op3)
+{
+  // CHECK-LABEL: test_svqdmlalb_n_s16
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqdmlalb.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmlalb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmlalb_n_s16'}}
+  return SVE_ACLE_FUNC(svqdmlalb,_n_s16,,)(op1, op2, op3);
+}
+
+svint32_t test_svqdmlalb_n_s32(svint32_t op1, svint16_t op2, int16_t op3)
+{
+  // CHECK-LABEL: test_svqdmlalb_n_s32
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlalb.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmlalb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmlalb_n_s32'}}
+  return SVE_ACLE_FUNC(svqdmlalb,_n_s32,,)(op1, op2, op3);
+}
+
+svint64_t test_svqdmlalb_n_s64(svint64_t op1, svint32_t op2, int32_t op3)
+{
+  // CHECK-LABEL: test_svqdmlalb_n_s64
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlalb.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmlalb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmlalb_n_s64'}}
+  return SVE_ACLE_FUNC(svqdmlalb,_n_s64,,)(op1, op2, op3);
+}
+
+svint32_t test_svqdmlalb_lane_s32(svint32_t op1, svint16_t op2, svint16_t op3)
+{
+  // CHECK-LABEL: test_svqdmlalb_lane_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlalb.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 0)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmlalb_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmlalb_lane_s32'}}
+  return SVE_ACLE_FUNC(svqdmlalb_lane,_s32,,)(op1, op2, op3, 0);
+}
+
+svint32_t test_svqdmlalb_lane_s32_1(svint32_t op1, svint16_t op2, svint16_t op3)
+{
+  // CHECK-LABEL: test_svqdmlalb_lane_s32_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlalb.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 7)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmlalb_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmlalb_lane_s32'}}
+  return SVE_ACLE_FUNC(svqdmlalb_lane,_s32,,)(op1, op2, op3, 7);
+}
+
+svint64_t test_svqdmlalb_lane_s64(svint64_t op1, svint32_t op2, svint32_t op3)
+{
+  // CHECK-LABEL: test_svqdmlalb_lane_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlalb.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 0)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmlalb_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmlalb_lane_s64'}}
+  return SVE_ACLE_FUNC(svqdmlalb_lane,_s64,,)(op1, op2, op3, 0);
+}
+
+svint64_t test_svqdmlalb_lane_s64_1(svint64_t op1, svint32_t op2, svint32_t op3)
+{
+  // CHECK-LABEL: test_svqdmlalb_lane_s64_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlalb.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 3)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmlalb_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmlalb_lane_s64'}}
+  return SVE_ACLE_FUNC(svqdmlalb_lane,_s64,,)(op1, op2, op3, 3);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qdmlalt.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qdmlalt.c
new file mode 100644
index 000000000000..f1554f37c876
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qdmlalt.c
@@ -0,0 +1,116 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svint16_t test_svqdmlalt_s16(svint16_t op1, svint8_t op2, svint8_t op3)
+{
+  // CHECK-LABEL: test_svqdmlalt_s16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqdmlalt.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmlalt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmlalt_s16'}}
+  return SVE_ACLE_FUNC(svqdmlalt,_s16,,)(op1, op2, op3);
+}
+
+svint32_t test_svqdmlalt_s32(svint32_t op1, svint16_t op2, svint16_t op3)
+{
+  // CHECK-LABEL: test_svqdmlalt_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlalt.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmlalt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmlalt_s32'}}
+  return SVE_ACLE_FUNC(svqdmlalt,_s32,,)(op1, op2, op3);
+}
+
+svint64_t test_svqdmlalt_s64(svint64_t op1, svint32_t op2, svint32_t op3)
+{
+  // CHECK-LABEL: test_svqdmlalt_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlalt.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmlalt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmlalt_s64'}}
+  return SVE_ACLE_FUNC(svqdmlalt,_s64,,)(op1, op2, op3);
+}
+
+svint16_t test_svqdmlalt_n_s16(svint16_t op1, svint8_t op2, int8_t op3)
+{
+  // CHECK-LABEL: test_svqdmlalt_n_s16
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqdmlalt.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmlalt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmlalt_n_s16'}}
+  return SVE_ACLE_FUNC(svqdmlalt,_n_s16,,)(op1, op2, op3);
+}
+
+svint32_t test_svqdmlalt_n_s32(svint32_t op1, svint16_t op2, int16_t op3)
+{
+  // CHECK-LABEL: test_svqdmlalt_n_s32
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlalt.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmlalt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmlalt_n_s32'}}
+  return SVE_ACLE_FUNC(svqdmlalt,_n_s32,,)(op1, op2, op3);
+}
+
+svint64_t test_svqdmlalt_n_s64(svint64_t op1, svint32_t op2, int32_t op3)
+{
+  // CHECK-LABEL: test_svqdmlalt_n_s64
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlalt.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmlalt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmlalt_n_s64'}}
+  return SVE_ACLE_FUNC(svqdmlalt,_n_s64,,)(op1, op2, op3);
+}
+
+svint32_t test_svqdmlalt_lane_s32(svint32_t op1, svint16_t op2, svint16_t op3)
+{
+  // CHECK-LABEL: test_svqdmlalt_lane_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlalt.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 0)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmlalt_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmlalt_lane_s32'}}
+  return SVE_ACLE_FUNC(svqdmlalt_lane,_s32,,)(op1, op2, op3, 0);
+}
+
+svint32_t test_svqdmlalt_lane_s32_1(svint32_t op1, svint16_t op2, svint16_t op3)
+{
+  // CHECK-LABEL: test_svqdmlalt_lane_s32_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlalt.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 7)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmlalt_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmlalt_lane_s32'}}
+  return SVE_ACLE_FUNC(svqdmlalt_lane,_s32,,)(op1, op2, op3, 7);
+}
+
+svint64_t test_svqdmlalt_lane_s64(svint64_t op1, svint32_t op2, svint32_t op3)
+{
+  // CHECK-LABEL: test_svqdmlalt_lane_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlalt.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 0)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmlalt_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmlalt_lane_s64'}}
+  return SVE_ACLE_FUNC(svqdmlalt_lane,_s64,,)(op1, op2, op3, 0);
+}
+
+svint64_t test_svqdmlalt_lane_s64_1(svint64_t op1, svint32_t op2, svint32_t op3)
+{
+  // CHECK-LABEL: test_svqdmlalt_lane_s64_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlalt.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 3)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmlalt_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmlalt_lane_s64'}}
+  return SVE_ACLE_FUNC(svqdmlalt_lane,_s64,,)(op1, op2, op3, 3);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qdmlslb.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qdmlslb.c
new file mode 100644
index 000000000000..29e1fcbb2c79
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qdmlslb.c
@@ -0,0 +1,116 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svint16_t test_svqdmlslb_s16(svint16_t op1, svint8_t op2, svint8_t op3)
+{
+  // CHECK-LABEL: test_svqdmlslb_s16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqdmlslb.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmlslb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmlslb_s16'}}
+  return SVE_ACLE_FUNC(svqdmlslb,_s16,,)(op1, op2, op3);
+}
+
+svint32_t test_svqdmlslb_s32(svint32_t op1, svint16_t op2, svint16_t op3)
+{
+  // CHECK-LABEL: test_svqdmlslb_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlslb.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmlslb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmlslb_s32'}}
+  return SVE_ACLE_FUNC(svqdmlslb,_s32,,)(op1, op2, op3);
+}
+
+svint64_t test_svqdmlslb_s64(svint64_t op1, svint32_t op2, svint32_t op3)
+{
+  // CHECK-LABEL: test_svqdmlslb_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlslb.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmlslb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmlslb_s64'}}
+  return SVE_ACLE_FUNC(svqdmlslb,_s64,,)(op1, op2, op3);
+}
+
+svint16_t test_svqdmlslb_n_s16(svint16_t op1, svint8_t op2, int8_t op3)
+{
+  // CHECK-LABEL: test_svqdmlslb_n_s16
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqdmlslb.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmlslb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmlslb_n_s16'}}
+  return SVE_ACLE_FUNC(svqdmlslb,_n_s16,,)(op1, op2, op3);
+}
+
+svint32_t test_svqdmlslb_n_s32(svint32_t op1, svint16_t op2, int16_t op3)
+{
+  // CHECK-LABEL: test_svqdmlslb_n_s32
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlslb.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmlslb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmlslb_n_s32'}}
+  return SVE_ACLE_FUNC(svqdmlslb,_n_s32,,)(op1, op2, op3);
+}
+
+svint64_t test_svqdmlslb_n_s64(svint64_t op1, svint32_t op2, int32_t op3)
+{
+  // CHECK-LABEL: test_svqdmlslb_n_s64
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlslb.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmlslb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmlslb_n_s64'}}
+  return SVE_ACLE_FUNC(svqdmlslb,_n_s64,,)(op1, op2, op3);
+}
+
+svint32_t test_svqdmlslb_lane_s32(svint32_t op1, svint16_t op2, svint16_t op3)
+{
+  // CHECK-LABEL: test_svqdmlslb_lane_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlslb.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 0)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmlslb_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmlslb_lane_s32'}}
+  return SVE_ACLE_FUNC(svqdmlslb_lane,_s32,,)(op1, op2, op3, 0);
+}
+
+svint32_t test_svqdmlslb_lane_s32_1(svint32_t op1, svint16_t op2, svint16_t op3)
+{
+  // CHECK-LABEL: test_svqdmlslb_lane_s32_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlslb.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 7)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmlslb_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmlslb_lane_s32'}}
+  return SVE_ACLE_FUNC(svqdmlslb_lane,_s32,,)(op1, op2, op3, 7);
+}
+
+svint64_t test_svqdmlslb_lane_s64(svint64_t op1, svint32_t op2, svint32_t op3)
+{
+  // CHECK-LABEL: test_svqdmlslb_lane_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlslb.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 0)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmlslb_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmlslb_lane_s64'}}
+  return SVE_ACLE_FUNC(svqdmlslb_lane,_s64,,)(op1, op2, op3, 0);
+}
+
+svint64_t test_svqdmlslb_lane_s64_1(svint64_t op1, svint32_t op2, svint32_t op3)
+{
+  // CHECK-LABEL: test_svqdmlslb_lane_s64_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlslb.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 3)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmlslb_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmlslb_lane_s64'}}
+  return SVE_ACLE_FUNC(svqdmlslb_lane,_s64,,)(op1, op2, op3, 3);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qdmlslt.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qdmlslt.c
new file mode 100644
index 000000000000..f3d78c06b3a4
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qdmlslt.c
@@ -0,0 +1,116 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svint16_t test_svqdmlslt_s16(svint16_t op1, svint8_t op2, svint8_t op3)
+{
+  // CHECK-LABEL: test_svqdmlslt_s16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqdmlslt.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmlslt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmlslt_s16'}}
+  return SVE_ACLE_FUNC(svqdmlslt,_s16,,)(op1, op2, op3);
+}
+
+svint32_t test_svqdmlslt_s32(svint32_t op1, svint16_t op2, svint16_t op3)
+{
+  // CHECK-LABEL: test_svqdmlslt_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlslt.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmlslt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmlslt_s32'}}
+  return SVE_ACLE_FUNC(svqdmlslt,_s32,,)(op1, op2, op3);
+}
+
+svint64_t test_svqdmlslt_s64(svint64_t op1, svint32_t op2, svint32_t op3)
+{
+  // CHECK-LABEL: test_svqdmlslt_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlslt.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmlslt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmlslt_s64'}}
+  return SVE_ACLE_FUNC(svqdmlslt,_s64,,)(op1, op2, op3);
+}
+
+svint16_t test_svqdmlslt_n_s16(svint16_t op1, svint8_t op2, int8_t op3)
+{
+  // CHECK-LABEL: test_svqdmlslt_n_s16
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqdmlslt.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmlslt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmlslt_n_s16'}}
+  return SVE_ACLE_FUNC(svqdmlslt,_n_s16,,)(op1, op2, op3);
+}
+
+svint32_t test_svqdmlslt_n_s32(svint32_t op1, svint16_t op2, int16_t op3)
+{
+  // CHECK-LABEL: test_svqdmlslt_n_s32
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlslt.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmlslt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmlslt_n_s32'}}
+  return SVE_ACLE_FUNC(svqdmlslt,_n_s32,,)(op1, op2, op3);
+}
+
+svint64_t test_svqdmlslt_n_s64(svint64_t op1, svint32_t op2, int32_t op3)
+{
+  // CHECK-LABEL: test_svqdmlslt_n_s64
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlslt.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmlslt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmlslt_n_s64'}}
+  return SVE_ACLE_FUNC(svqdmlslt,_n_s64,,)(op1, op2, op3);
+}
+
+svint32_t test_svqdmlslt_lane_s32(svint32_t op1, svint16_t op2, svint16_t op3)
+{
+  // CHECK-LABEL: test_svqdmlslt_lane_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlslt.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 0)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmlslt_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmlslt_lane_s32'}}
+  return SVE_ACLE_FUNC(svqdmlslt_lane,_s32,,)(op1, op2, op3, 0);
+}
+
+svint32_t test_svqdmlslt_lane_s32_1(svint32_t op1, svint16_t op2, svint16_t op3)
+{
+  // CHECK-LABEL: test_svqdmlslt_lane_s32_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmlslt.lane.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3, i32 7)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmlslt_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmlslt_lane_s32'}}
+  return SVE_ACLE_FUNC(svqdmlslt_lane,_s32,,)(op1, op2, op3, 7);
+}
+
+svint64_t test_svqdmlslt_lane_s64(svint64_t op1, svint32_t op2, svint32_t op3)
+{
+  // CHECK-LABEL: test_svqdmlslt_lane_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlslt.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 0)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmlslt_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmlslt_lane_s64'}}
+  return SVE_ACLE_FUNC(svqdmlslt_lane,_s64,,)(op1, op2, op3, 0);
+}
+
+svint64_t test_svqdmlslt_lane_s64_1(svint64_t op1, svint32_t op2, svint32_t op3)
+{
+  // CHECK-LABEL: test_svqdmlslt_lane_s64_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmlslt.lane.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3, i32 3)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmlslt_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmlslt_lane_s64'}}
+  return SVE_ACLE_FUNC(svqdmlslt_lane,_s64,,)(op1, op2, op3, 3);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qdmullb.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qdmullb.c
new file mode 100644
index 000000000000..e7101e43ad20
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qdmullb.c
@@ -0,0 +1,116 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svint16_t test_svqdmullb_s16(svint8_t op1, svint8_t op2)
+{
+  // CHECK-LABEL: test_svqdmullb_s16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqdmullb.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmullb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmullb_s16'}}
+  return SVE_ACLE_FUNC(svqdmullb,_s16,,)(op1, op2);
+}
+
+svint32_t test_svqdmullb_s32(svint16_t op1, svint16_t op2)
+{
+  // CHECK-LABEL: test_svqdmullb_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmullb.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmullb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmullb_s32'}}
+  return SVE_ACLE_FUNC(svqdmullb,_s32,,)(op1, op2);
+}
+
+svint64_t test_svqdmullb_s64(svint32_t op1, svint32_t op2)
+{
+  // CHECK-LABEL: test_svqdmullb_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmullb.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmullb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmullb_s64'}}
+  return SVE_ACLE_FUNC(svqdmullb,_s64,,)(op1, op2);
+}
+
+svint16_t test_svqdmullb_n_s16(svint8_t op1, int8_t op2)
+{
+  // CHECK-LABEL: test_svqdmullb_n_s16
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqdmullb.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmullb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmullb_n_s16'}}
+  return SVE_ACLE_FUNC(svqdmullb,_n_s16,,)(op1, op2);
+}
+
+svint32_t test_svqdmullb_n_s32(svint16_t op1, int16_t op2)
+{
+  // CHECK-LABEL: test_svqdmullb_n_s32
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmullb.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmullb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmullb_n_s32'}}
+  return SVE_ACLE_FUNC(svqdmullb,_n_s32,,)(op1, op2);
+}
+
+svint64_t test_svqdmullb_n_s64(svint32_t op1, int32_t op2)
+{
+  // CHECK-LABEL: test_svqdmullb_n_s64
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmullb.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmullb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmullb_n_s64'}}
+  return SVE_ACLE_FUNC(svqdmullb,_n_s64,,)(op1, op2);
+}
+
+svint32_t test_svqdmullb_lane_s32(svint16_t op1, svint16_t op2)
+{
+  // CHECK-LABEL: test_svqdmullb_lane_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmullb.lane.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 0)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmullb_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmullb_lane_s32'}}
+  return SVE_ACLE_FUNC(svqdmullb_lane,_s32,,)(op1, op2, 0);
+}
+
+svint32_t test_svqdmullb_lane_s32_1(svint16_t op1, svint16_t op2)
+{
+  // CHECK-LABEL: test_svqdmullb_lane_s32_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmullb.lane.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 7)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmullb_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmullb_lane_s32'}}
+  return SVE_ACLE_FUNC(svqdmullb_lane,_s32,,)(op1, op2, 7);
+}
+
+svint64_t test_svqdmullb_lane_s64(svint32_t op1, svint32_t op2)
+{
+  // CHECK-LABEL: test_svqdmullb_lane_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmullb.lane.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 0)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmullb_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmullb_lane_s64'}}
+  return SVE_ACLE_FUNC(svqdmullb_lane,_s64,,)(op1, op2, 0);
+}
+
+svint64_t test_svqdmullb_lane_s64_1(svint32_t op1, svint32_t op2)
+{
+  // CHECK-LABEL: test_svqdmullb_lane_s64_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmullb.lane.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 3)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmullb_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmullb_lane_s64'}}
+  return SVE_ACLE_FUNC(svqdmullb_lane,_s64,,)(op1, op2, 3);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qdmullt.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qdmullt.c
new file mode 100644
index 000000000000..4943b503c6a8
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_qdmullt.c
@@ -0,0 +1,116 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svint16_t test_svqdmullt_s16(svint8_t op1, svint8_t op2)
+{
+  // CHECK-LABEL: test_svqdmullt_s16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqdmullt.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmullt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmullt_s16'}}
+  return SVE_ACLE_FUNC(svqdmullt,_s16,,)(op1, op2);
+}
+
+svint32_t test_svqdmullt_s32(svint16_t op1, svint16_t op2)
+{
+  // CHECK-LABEL: test_svqdmullt_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmullt.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmullt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmullt_s32'}}
+  return SVE_ACLE_FUNC(svqdmullt,_s32,,)(op1, op2);
+}
+
+svint64_t test_svqdmullt_s64(svint32_t op1, svint32_t op2)
+{
+  // CHECK-LABEL: test_svqdmullt_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmullt.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmullt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmullt_s64'}}
+  return SVE_ACLE_FUNC(svqdmullt,_s64,,)(op1, op2);
+}
+
+svint16_t test_svqdmullt_n_s16(svint8_t op1, int8_t op2)
+{
+  // CHECK-LABEL: test_svqdmullt_n_s16
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqdmullt.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmullt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmullt_n_s16'}}
+  return SVE_ACLE_FUNC(svqdmullt,_n_s16,,)(op1, op2);
+}
+
+svint32_t test_svqdmullt_n_s32(svint16_t op1, int16_t op2)
+{
+  // CHECK-LABEL: test_svqdmullt_n_s32
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmullt.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmullt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmullt_n_s32'}}
+  return SVE_ACLE_FUNC(svqdmullt,_n_s32,,)(op1, op2);
+}
+
+svint64_t test_svqdmullt_n_s64(svint32_t op1, int32_t op2)
+{
+  // CHECK-LABEL: test_svqdmullt_n_s64
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmullt.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmullt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmullt_n_s64'}}
+  return SVE_ACLE_FUNC(svqdmullt,_n_s64,,)(op1, op2);
+}
+
+svint32_t test_svqdmullt_lane_s32(svint16_t op1, svint16_t op2)
+{
+  // CHECK-LABEL: test_svqdmullt_lane_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmullt.lane.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 0)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmullt_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmullt_lane_s32'}}
+  return SVE_ACLE_FUNC(svqdmullt_lane,_s32,,)(op1, op2, 0);
+}
+
+svint32_t test_svqdmullt_lane_s32_1(svint16_t op1, svint16_t op2)
+{
+  // CHECK-LABEL: test_svqdmullt_lane_s32_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqdmullt.lane.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 7)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmullt_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmullt_lane_s32'}}
+  return SVE_ACLE_FUNC(svqdmullt_lane,_s32,,)(op1, op2, 7);
+}
+
+svint64_t test_svqdmullt_lane_s64(svint32_t op1, svint32_t op2)
+{
+  // CHECK-LABEL: test_svqdmullt_lane_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmullt.lane.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 0)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmullt_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmullt_lane_s64'}}
+  return SVE_ACLE_FUNC(svqdmullt_lane,_s64,,)(op1, op2, 0);
+}
+
+svint64_t test_svqdmullt_lane_s64_1(svint32_t op1, svint32_t op2)
+{
+  // CHECK-LABEL: test_svqdmullt_lane_s64_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqdmullt.lane.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 3)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svqdmullt_lane'}}
+  // expected-warning at +1 {{implicit declaration of function 'svqdmullt_lane_s64'}}
+  return SVE_ACLE_FUNC(svqdmullt_lane,_s64,,)(op1, op2, 3);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_shllb.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_shllb.c
new file mode 100644
index 000000000000..e0b10d095928
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_shllb.c
@@ -0,0 +1,133 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svint16_t test_svshllb_n_s16(svint8_t op1)
+{
+  // CHECK-LABEL: test_svshllb_n_s16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sshllb.nxv8i16(<vscale x 16 x i8> %op1, i32 0)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svshllb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svshllb_n_s16'}}
+  return SVE_ACLE_FUNC(svshllb,_n_s16,,)(op1, 0);
+}
+
+svint16_t test_svshllb_n_s16_1(svint8_t op1)
+{
+  // CHECK-LABEL: test_svshllb_n_s16_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sshllb.nxv8i16(<vscale x 16 x i8> %op1, i32 7)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svshllb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svshllb_n_s16'}}
+  return SVE_ACLE_FUNC(svshllb,_n_s16,,)(op1, 7);
+}
+
+svint32_t test_svshllb_n_s32(svint16_t op1)
+{
+  // CHECK-LABEL: test_svshllb_n_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sshllb.nxv4i32(<vscale x 8 x i16> %op1, i32 0)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svshllb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svshllb_n_s32'}}
+  return SVE_ACLE_FUNC(svshllb,_n_s32,,)(op1, 0);
+}
+
+svint32_t test_svshllb_n_s32_1(svint16_t op1)
+{
+  // CHECK-LABEL: test_svshllb_n_s32_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sshllb.nxv4i32(<vscale x 8 x i16> %op1, i32 15)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svshllb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svshllb_n_s32'}}
+  return SVE_ACLE_FUNC(svshllb,_n_s32,,)(op1, 15);
+}
+
+svint64_t test_svshllb_n_s64(svint32_t op1)
+{
+  // CHECK-LABEL: test_svshllb_n_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sshllb.nxv2i64(<vscale x 4 x i32> %op1, i32 0)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svshllb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svshllb_n_s64'}}
+  return SVE_ACLE_FUNC(svshllb,_n_s64,,)(op1, 0);
+}
+
+svint64_t test_svshllb_n_s64_1(svint32_t op1)
+{
+  // CHECK-LABEL: test_svshllb_n_s64_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sshllb.nxv2i64(<vscale x 4 x i32> %op1, i32 31)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svshllb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svshllb_n_s64'}}
+  return SVE_ACLE_FUNC(svshllb,_n_s64,,)(op1, 31);
+}
+
+svuint16_t test_svshllb_n_u16(svuint8_t op1)
+{
+  // CHECK-LABEL: test_svshllb_n_u16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ushllb.nxv8i16(<vscale x 16 x i8> %op1, i32 0)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svshllb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svshllb_n_u16'}}
+  return SVE_ACLE_FUNC(svshllb,_n_u16,,)(op1, 0);
+}
+
+svuint16_t test_svshllb_n_u16_1(svuint8_t op1)
+{
+  // CHECK-LABEL: test_svshllb_n_u16_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ushllb.nxv8i16(<vscale x 16 x i8> %op1, i32 7)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svshllb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svshllb_n_u16'}}
+  return SVE_ACLE_FUNC(svshllb,_n_u16,,)(op1, 7);
+}
+
+svuint32_t test_svshllb_n_u32(svuint16_t op1)
+{
+  // CHECK-LABEL: test_svshllb_n_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ushllb.nxv4i32(<vscale x 8 x i16> %op1, i32 0)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svshllb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svshllb_n_u32'}}
+  return SVE_ACLE_FUNC(svshllb,_n_u32,,)(op1, 0);
+}
+
+svuint32_t test_svshllb_n_u32_1(svuint16_t op1)
+{
+  // CHECK-LABEL: test_svshllb_n_u32_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ushllb.nxv4i32(<vscale x 8 x i16> %op1, i32 15)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svshllb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svshllb_n_u32'}}
+  return SVE_ACLE_FUNC(svshllb,_n_u32,,)(op1, 15);
+}
+
+svuint64_t test_svshllb_n_u64(svuint32_t op1)
+{
+  // CHECK-LABEL: test_svshllb_n_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ushllb.nxv2i64(<vscale x 4 x i32> %op1, i32 0)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svshllb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svshllb_n_u64'}}
+  return SVE_ACLE_FUNC(svshllb,_n_u64,,)(op1, 0);
+}
+
+svuint64_t test_svshllb_n_u64_1(svuint32_t op1)
+{
+  // CHECK-LABEL: test_svshllb_n_u64_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ushllb.nxv2i64(<vscale x 4 x i32> %op1, i32 31)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svshllb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svshllb_n_u64'}}
+  return SVE_ACLE_FUNC(svshllb,_n_u64,,)(op1, 31);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_shllt.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_shllt.c
new file mode 100644
index 000000000000..6faf3c819c7f
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_shllt.c
@@ -0,0 +1,133 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svint16_t test_svshllt_n_s16(svint8_t op1)
+{
+  // CHECK-LABEL: test_svshllt_n_s16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sshllt.nxv8i16(<vscale x 16 x i8> %op1, i32 0)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svshllt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svshllt_n_s16'}}
+  return SVE_ACLE_FUNC(svshllt,_n_s16,,)(op1, 0);
+}
+
+svint16_t test_svshllt_n_s16_1(svint8_t op1)
+{
+  // CHECK-LABEL: test_svshllt_n_s16_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sshllt.nxv8i16(<vscale x 16 x i8> %op1, i32 7)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svshllt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svshllt_n_s16'}}
+  return SVE_ACLE_FUNC(svshllt,_n_s16,,)(op1, 7);
+}
+
+svint32_t test_svshllt_n_s32(svint16_t op1)
+{
+  // CHECK-LABEL: test_svshllt_n_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sshllt.nxv4i32(<vscale x 8 x i16> %op1, i32 0)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svshllt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svshllt_n_s32'}}
+  return SVE_ACLE_FUNC(svshllt,_n_s32,,)(op1, 0);
+}
+
+svint32_t test_svshllt_n_s32_1(svint16_t op1)
+{
+  // CHECK-LABEL: test_svshllt_n_s32_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sshllt.nxv4i32(<vscale x 8 x i16> %op1, i32 15)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svshllt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svshllt_n_s32'}}
+  return SVE_ACLE_FUNC(svshllt,_n_s32,,)(op1, 15);
+}
+
+svint64_t test_svshllt_n_s64(svint32_t op1)
+{
+  // CHECK-LABEL: test_svshllt_n_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sshllt.nxv2i64(<vscale x 4 x i32> %op1, i32 0)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svshllt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svshllt_n_s64'}}
+  return SVE_ACLE_FUNC(svshllt,_n_s64,,)(op1, 0);
+}
+
+svint64_t test_svshllt_n_s64_1(svint32_t op1)
+{
+  // CHECK-LABEL: test_svshllt_n_s64_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sshllt.nxv2i64(<vscale x 4 x i32> %op1, i32 31)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svshllt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svshllt_n_s64'}}
+  return SVE_ACLE_FUNC(svshllt,_n_s64,,)(op1, 31);
+}
+
+svuint16_t test_svshllt_n_u16(svuint8_t op1)
+{
+  // CHECK-LABEL: test_svshllt_n_u16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ushllt.nxv8i16(<vscale x 16 x i8> %op1, i32 0)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svshllt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svshllt_n_u16'}}
+  return SVE_ACLE_FUNC(svshllt,_n_u16,,)(op1, 0);
+}
+
+svuint16_t test_svshllt_n_u16_1(svuint8_t op1)
+{
+  // CHECK-LABEL: test_svshllt_n_u16_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ushllt.nxv8i16(<vscale x 16 x i8> %op1, i32 7)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svshllt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svshllt_n_u16'}}
+  return SVE_ACLE_FUNC(svshllt,_n_u16,,)(op1, 7);
+}
+
+svuint32_t test_svshllt_n_u32(svuint16_t op1)
+{
+  // CHECK-LABEL: test_svshllt_n_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ushllt.nxv4i32(<vscale x 8 x i16> %op1, i32 0)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svshllt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svshllt_n_u32'}}
+  return SVE_ACLE_FUNC(svshllt,_n_u32,,)(op1, 0);
+}
+
+svuint32_t test_svshllt_n_u32_1(svuint16_t op1)
+{
+  // CHECK-LABEL: test_svshllt_n_u32_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ushllt.nxv4i32(<vscale x 8 x i16> %op1, i32 15)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svshllt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svshllt_n_u32'}}
+  return SVE_ACLE_FUNC(svshllt,_n_u32,,)(op1, 15);
+}
+
+svuint64_t test_svshllt_n_u64(svuint32_t op1)
+{
+  // CHECK-LABEL: test_svshllt_n_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ushllt.nxv2i64(<vscale x 4 x i32> %op1, i32 0)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svshllt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svshllt_n_u64'}}
+  return SVE_ACLE_FUNC(svshllt,_n_u64,,)(op1, 0);
+}
+
+svuint64_t test_svshllt_n_u64_1(svuint32_t op1)
+{
+  // CHECK-LABEL: test_svshllt_n_u64_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ushllt.nxv2i64(<vscale x 4 x i32> %op1, i32 31)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svshllt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svshllt_n_u64'}}
+  return SVE_ACLE_FUNC(svshllt,_n_u64,,)(op1, 31);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_sublb.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_sublb.c
new file mode 100644
index 000000000000..e9b7b286fd91
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_sublb.c
@@ -0,0 +1,139 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svint16_t test_svsublb_s16(svint8_t op1, svint8_t op2)
+{
+  // CHECK-LABEL: test_svsublb_s16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ssublb.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svsublb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svsublb_s16'}}
+  return SVE_ACLE_FUNC(svsublb,_s16,,)(op1, op2);
+}
+
+svint32_t test_svsublb_s32(svint16_t op1, svint16_t op2)
+{
+  // CHECK-LABEL: test_svsublb_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ssublb.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svsublb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svsublb_s32'}}
+  return SVE_ACLE_FUNC(svsublb,_s32,,)(op1, op2);
+}
+
+svint64_t test_svsublb_s64(svint32_t op1, svint32_t op2)
+{
+  // CHECK-LABEL: test_svsublb_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ssublb.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svsublb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svsublb_s64'}}
+  return SVE_ACLE_FUNC(svsublb,_s64,,)(op1, op2);
+}
+
+svuint16_t test_svsublb_u16(svuint8_t op1, svuint8_t op2)
+{
+  // CHECK-LABEL: test_svsublb_u16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.usublb.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svsublb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svsublb_u16'}}
+  return SVE_ACLE_FUNC(svsublb,_u16,,)(op1, op2);
+}
+
+svuint32_t test_svsublb_u32(svuint16_t op1, svuint16_t op2)
+{
+  // CHECK-LABEL: test_svsublb_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.usublb.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svsublb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svsublb_u32'}}
+  return SVE_ACLE_FUNC(svsublb,_u32,,)(op1, op2);
+}
+
+svuint64_t test_svsublb_u64(svuint32_t op1, svuint32_t op2)
+{
+  // CHECK-LABEL: test_svsublb_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.usublb.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svsublb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svsublb_u64'}}
+  return SVE_ACLE_FUNC(svsublb,_u64,,)(op1, op2);
+}
+
+svint16_t test_svsublb_n_s16(svint8_t op1, int8_t op2)
+{
+  // CHECK-LABEL: test_svsublb_n_s16
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ssublb.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svsublb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svsublb_n_s16'}}
+  return SVE_ACLE_FUNC(svsublb,_n_s16,,)(op1, op2);
+}
+
+svint32_t test_svsublb_n_s32(svint16_t op1, int16_t op2)
+{
+  // CHECK-LABEL: test_svsublb_n_s32
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ssublb.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svsublb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svsublb_n_s32'}}
+  return SVE_ACLE_FUNC(svsublb,_n_s32,,)(op1, op2);
+}
+
+svint64_t test_svsublb_n_s64(svint32_t op1, int32_t op2)
+{
+  // CHECK-LABEL: test_svsublb_n_s64
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ssublb.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svsublb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svsublb_n_s64'}}
+  return SVE_ACLE_FUNC(svsublb,_n_s64,,)(op1, op2);
+}
+
+svuint16_t test_svsublb_n_u16(svuint8_t op1, uint8_t op2)
+{
+  // CHECK-LABEL: test_svsublb_n_u16
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.usublb.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svsublb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svsublb_n_u16'}}
+  return SVE_ACLE_FUNC(svsublb,_n_u16,,)(op1, op2);
+}
+
+svuint32_t test_svsublb_n_u32(svuint16_t op1, uint16_t op2)
+{
+  // CHECK-LABEL: test_svsublb_n_u32
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.usublb.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svsublb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svsublb_n_u32'}}
+  return SVE_ACLE_FUNC(svsublb,_n_u32,,)(op1, op2);
+}
+
+svuint64_t test_svsublb_n_u64(svuint32_t op1, uint32_t op2)
+{
+  // CHECK-LABEL: test_svsublb_n_u64
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.usublb.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svsublb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svsublb_n_u64'}}
+  return SVE_ACLE_FUNC(svsublb,_n_u64,,)(op1, op2);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_sublt.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_sublt.c
new file mode 100644
index 000000000000..02cc433ac218
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_sublt.c
@@ -0,0 +1,139 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svint16_t test_svsublt_s16(svint8_t op1, svint8_t op2)
+{
+  // CHECK-LABEL: test_svsublt_s16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ssublt.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svsublt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svsublt_s16'}}
+  return SVE_ACLE_FUNC(svsublt,_s16,,)(op1, op2);
+}
+
+svint32_t test_svsublt_s32(svint16_t op1, svint16_t op2)
+{
+  // CHECK-LABEL: test_svsublt_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ssublt.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svsublt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svsublt_s32'}}
+  return SVE_ACLE_FUNC(svsublt,_s32,,)(op1, op2);
+}
+
+svint64_t test_svsublt_s64(svint32_t op1, svint32_t op2)
+{
+  // CHECK-LABEL: test_svsublt_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ssublt.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svsublt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svsublt_s64'}}
+  return SVE_ACLE_FUNC(svsublt,_s64,,)(op1, op2);
+}
+
+svuint16_t test_svsublt_u16(svuint8_t op1, svuint8_t op2)
+{
+  // CHECK-LABEL: test_svsublt_u16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.usublt.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svsublt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svsublt_u16'}}
+  return SVE_ACLE_FUNC(svsublt,_u16,,)(op1, op2);
+}
+
+svuint32_t test_svsublt_u32(svuint16_t op1, svuint16_t op2)
+{
+  // CHECK-LABEL: test_svsublt_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.usublt.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svsublt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svsublt_u32'}}
+  return SVE_ACLE_FUNC(svsublt,_u32,,)(op1, op2);
+}
+
+svuint64_t test_svsublt_u64(svuint32_t op1, svuint32_t op2)
+{
+  // CHECK-LABEL: test_svsublt_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.usublt.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svsublt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svsublt_u64'}}
+  return SVE_ACLE_FUNC(svsublt,_u64,,)(op1, op2);
+}
+
+svint16_t test_svsublt_n_s16(svint8_t op1, int8_t op2)
+{
+  // CHECK-LABEL: test_svsublt_n_s16
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ssublt.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svsublt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svsublt_n_s16'}}
+  return SVE_ACLE_FUNC(svsublt,_n_s16,,)(op1, op2);
+}
+
+svint32_t test_svsublt_n_s32(svint16_t op1, int16_t op2)
+{
+  // CHECK-LABEL: test_svsublt_n_s32
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ssublt.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svsublt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svsublt_n_s32'}}
+  return SVE_ACLE_FUNC(svsublt,_n_s32,,)(op1, op2);
+}
+
+svint64_t test_svsublt_n_s64(svint32_t op1, int32_t op2)
+{
+  // CHECK-LABEL: test_svsublt_n_s64
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ssublt.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svsublt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svsublt_n_s64'}}
+  return SVE_ACLE_FUNC(svsublt,_n_s64,,)(op1, op2);
+}
+
+svuint16_t test_svsublt_n_u16(svuint8_t op1, uint8_t op2)
+{
+  // CHECK-LABEL: test_svsublt_n_u16
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.usublt.nxv8i16(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svsublt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svsublt_n_u16'}}
+  return SVE_ACLE_FUNC(svsublt,_n_u16,,)(op1, op2);
+}
+
+svuint32_t test_svsublt_n_u32(svuint16_t op1, uint16_t op2)
+{
+  // CHECK-LABEL: test_svsublt_n_u32
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.usublt.nxv4i32(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svsublt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svsublt_n_u32'}}
+  return SVE_ACLE_FUNC(svsublt,_n_u32,,)(op1, op2);
+}
+
+svuint64_t test_svsublt_n_u64(svuint32_t op1, uint32_t op2)
+{
+  // CHECK-LABEL: test_svsublt_n_u64
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.usublt.nxv2i64(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svsublt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svsublt_n_u64'}}
+  return SVE_ACLE_FUNC(svsublt,_n_u64,,)(op1, op2);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_subwb.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_subwb.c
new file mode 100644
index 000000000000..e73e3481d6c9
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_subwb.c
@@ -0,0 +1,139 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svint16_t test_svsubwb_s16(svint16_t op1, svint8_t op2)
+{
+  // CHECK-LABEL: test_svsubwb_s16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ssubwb.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svsubwb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svsubwb_s16'}}
+  return SVE_ACLE_FUNC(svsubwb,_s16,,)(op1, op2);
+}
+
+svint32_t test_svsubwb_s32(svint32_t op1, svint16_t op2)
+{
+  // CHECK-LABEL: test_svsubwb_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ssubwb.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svsubwb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svsubwb_s32'}}
+  return SVE_ACLE_FUNC(svsubwb,_s32,,)(op1, op2);
+}
+
+svint64_t test_svsubwb_s64(svint64_t op1, svint32_t op2)
+{
+  // CHECK-LABEL: test_svsubwb_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ssubwb.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svsubwb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svsubwb_s64'}}
+  return SVE_ACLE_FUNC(svsubwb,_s64,,)(op1, op2);
+}
+
+svuint16_t test_svsubwb_u16(svuint16_t op1, svuint8_t op2)
+{
+  // CHECK-LABEL: test_svsubwb_u16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.usubwb.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svsubwb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svsubwb_u16'}}
+  return SVE_ACLE_FUNC(svsubwb,_u16,,)(op1, op2);
+}
+
+svuint32_t test_svsubwb_u32(svuint32_t op1, svuint16_t op2)
+{
+  // CHECK-LABEL: test_svsubwb_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.usubwb.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svsubwb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svsubwb_u32'}}
+  return SVE_ACLE_FUNC(svsubwb,_u32,,)(op1, op2);
+}
+
+svuint64_t test_svsubwb_u64(svuint64_t op1, svuint32_t op2)
+{
+  // CHECK-LABEL: test_svsubwb_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.usubwb.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svsubwb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svsubwb_u64'}}
+  return SVE_ACLE_FUNC(svsubwb,_u64,,)(op1, op2);
+}
+
+svint16_t test_svsubwb_n_s16(svint16_t op1, int8_t op2)
+{
+  // CHECK-LABEL: test_svsubwb_n_s16
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ssubwb.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svsubwb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svsubwb_n_s16'}}
+  return SVE_ACLE_FUNC(svsubwb,_n_s16,,)(op1, op2);
+}
+
+svint32_t test_svsubwb_n_s32(svint32_t op1, int16_t op2)
+{
+  // CHECK-LABEL: test_svsubwb_n_s32
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ssubwb.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svsubwb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svsubwb_n_s32'}}
+  return SVE_ACLE_FUNC(svsubwb,_n_s32,,)(op1, op2);
+}
+
+svint64_t test_svsubwb_n_s64(svint64_t op1, int32_t op2)
+{
+  // CHECK-LABEL: test_svsubwb_n_s64
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ssubwb.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svsubwb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svsubwb_n_s64'}}
+  return SVE_ACLE_FUNC(svsubwb,_n_s64,,)(op1, op2);
+}
+
+svuint16_t test_svsubwb_n_u16(svuint16_t op1, uint8_t op2)
+{
+  // CHECK-LABEL: test_svsubwb_n_u16
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.usubwb.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svsubwb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svsubwb_n_u16'}}
+  return SVE_ACLE_FUNC(svsubwb,_n_u16,,)(op1, op2);
+}
+
+svuint32_t test_svsubwb_n_u32(svuint32_t op1, uint16_t op2)
+{
+  // CHECK-LABEL: test_svsubwb_n_u32
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.usubwb.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svsubwb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svsubwb_n_u32'}}
+  return SVE_ACLE_FUNC(svsubwb,_n_u32,,)(op1, op2);
+}
+
+svuint64_t test_svsubwb_n_u64(svuint64_t op1, uint32_t op2)
+{
+  // CHECK-LABEL: test_svsubwb_n_u64
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.usubwb.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svsubwb'}}
+  // expected-warning at +1 {{implicit declaration of function 'svsubwb_n_u64'}}
+  return SVE_ACLE_FUNC(svsubwb,_n_u64,,)(op1, op2);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_subwt.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_subwt.c
new file mode 100644
index 000000000000..08b9e97ed9a0
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_subwt.c
@@ -0,0 +1,139 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svint16_t test_svsubwt_s16(svint16_t op1, svint8_t op2)
+{
+  // CHECK-LABEL: test_svsubwt_s16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ssubwt.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svsubwt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svsubwt_s16'}}
+  return SVE_ACLE_FUNC(svsubwt,_s16,,)(op1, op2);
+}
+
+svint32_t test_svsubwt_s32(svint32_t op1, svint16_t op2)
+{
+  // CHECK-LABEL: test_svsubwt_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ssubwt.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svsubwt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svsubwt_s32'}}
+  return SVE_ACLE_FUNC(svsubwt,_s32,,)(op1, op2);
+}
+
+svint64_t test_svsubwt_s64(svint64_t op1, svint32_t op2)
+{
+  // CHECK-LABEL: test_svsubwt_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ssubwt.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svsubwt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svsubwt_s64'}}
+  return SVE_ACLE_FUNC(svsubwt,_s64,,)(op1, op2);
+}
+
+svuint16_t test_svsubwt_u16(svuint16_t op1, svuint8_t op2)
+{
+  // CHECK-LABEL: test_svsubwt_u16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.usubwt.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %op2)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svsubwt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svsubwt_u16'}}
+  return SVE_ACLE_FUNC(svsubwt,_u16,,)(op1, op2);
+}
+
+svuint32_t test_svsubwt_u32(svuint32_t op1, svuint16_t op2)
+{
+  // CHECK-LABEL: test_svsubwt_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.usubwt.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %op2)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svsubwt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svsubwt_u32'}}
+  return SVE_ACLE_FUNC(svsubwt,_u32,,)(op1, op2);
+}
+
+svuint64_t test_svsubwt_u64(svuint64_t op1, svuint32_t op2)
+{
+  // CHECK-LABEL: test_svsubwt_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.usubwt.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %op2)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svsubwt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svsubwt_u64'}}
+  return SVE_ACLE_FUNC(svsubwt,_u64,,)(op1, op2);
+}
+
+svint16_t test_svsubwt_n_s16(svint16_t op1, int8_t op2)
+{
+  // CHECK-LABEL: test_svsubwt_n_s16
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.ssubwt.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svsubwt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svsubwt_n_s16'}}
+  return SVE_ACLE_FUNC(svsubwt,_n_s16,,)(op1, op2);
+}
+
+svint32_t test_svsubwt_n_s32(svint32_t op1, int16_t op2)
+{
+  // CHECK-LABEL: test_svsubwt_n_s32
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.ssubwt.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svsubwt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svsubwt_n_s32'}}
+  return SVE_ACLE_FUNC(svsubwt,_n_s32,,)(op1, op2);
+}
+
+svint64_t test_svsubwt_n_s64(svint64_t op1, int32_t op2)
+{
+  // CHECK-LABEL: test_svsubwt_n_s64
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.ssubwt.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svsubwt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svsubwt_n_s64'}}
+  return SVE_ACLE_FUNC(svsubwt,_n_s64,,)(op1, op2);
+}
+
+svuint16_t test_svsubwt_n_u16(svuint16_t op1, uint8_t op2)
+{
+  // CHECK-LABEL: test_svsubwt_n_u16
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.usubwt.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svsubwt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svsubwt_n_u16'}}
+  return SVE_ACLE_FUNC(svsubwt,_n_u16,,)(op1, op2);
+}
+
+svuint32_t test_svsubwt_n_u32(svuint32_t op1, uint16_t op2)
+{
+  // CHECK-LABEL: test_svsubwt_n_u32
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.usubwt.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svsubwt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svsubwt_n_u32'}}
+  return SVE_ACLE_FUNC(svsubwt,_n_u32,,)(op1, op2);
+}
+
+svuint64_t test_svsubwt_n_u64(svuint64_t op1, uint32_t op2)
+{
+  // CHECK-LABEL: test_svsubwt_n_u64
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op2)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.usubwt.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svsubwt'}}
+  // expected-warning at +1 {{implicit declaration of function 'svsubwt_n_u64'}}
+  return SVE_ACLE_FUNC(svsubwt,_n_u64,,)(op1, op2);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_mlalb.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_mlalb.c
new file mode 100644
index 000000000000..01eca35e4685
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_mlalb.c
@@ -0,0 +1,35 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+#include <arm_sve.h>
+
+svint32_t test_svmlalb_lane_s32(svint32_t op1, svint16_t op2, svint16_t op3)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}}
+  return SVE_ACLE_FUNC(svmlalb_lane,_s32,,)(op1, op2, op3, -1);
+}
+
+svint64_t test_svmlalb_lane_s64(svint64_t op1, svint32_t op2, svint32_t op3)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}}
+  return SVE_ACLE_FUNC(svmlalb_lane,_s64,,)(op1, op2, op3, -1);
+}
+
+svuint32_t test_svmlalb_lane_u32(svuint32_t op1, svuint16_t op2, svuint16_t op3)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}}
+  return SVE_ACLE_FUNC(svmlalb_lane,_u32,,)(op1, op2, op3, 8);
+}
+
+svuint64_t test_svmlalb_lane_u64(svuint64_t op1, svuint32_t op2, svuint32_t op3)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}}
+  return SVE_ACLE_FUNC(svmlalb_lane,_u64,,)(op1, op2, op3, 4);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_mlalt.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_mlalt.c
new file mode 100644
index 000000000000..52ebade2dd74
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_mlalt.c
@@ -0,0 +1,35 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+#include <arm_sve.h>
+
+svint32_t test_svmlalt_lane_s32(svint32_t op1, svint16_t op2, svint16_t op3)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}}
+  return SVE_ACLE_FUNC(svmlalt_lane,_s32,,)(op1, op2, op3, -1);
+}
+
+svint64_t test_svmlalt_lane_s64(svint64_t op1, svint32_t op2, svint32_t op3)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}}
+  return SVE_ACLE_FUNC(svmlalt_lane,_s64,,)(op1, op2, op3, -1);
+}
+
+svuint32_t test_svmlalt_lane_u32(svuint32_t op1, svuint16_t op2, svuint16_t op3)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}}
+  return SVE_ACLE_FUNC(svmlalt_lane,_u32,,)(op1, op2, op3, 8);
+}
+
+svuint64_t test_svmlalt_lane_u64(svuint64_t op1, svuint32_t op2, svuint32_t op3)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}}
+  return SVE_ACLE_FUNC(svmlalt_lane,_u64,,)(op1, op2, op3, 4);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_mlslb.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_mlslb.c
new file mode 100644
index 000000000000..f27e09476b80
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_mlslb.c
@@ -0,0 +1,35 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+#include <arm_sve.h>
+
+svint32_t test_svmlslb_lane_s32(svint32_t op1, svint16_t op2, svint16_t op3)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}}
+  return SVE_ACLE_FUNC(svmlslb_lane,_s32,,)(op1, op2, op3, -1);
+}
+
+svint64_t test_svmlslb_lane_s64(svint64_t op1, svint32_t op2, svint32_t op3)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}}
+  return SVE_ACLE_FUNC(svmlslb_lane,_s64,,)(op1, op2, op3, -1);
+}
+
+svuint32_t test_svmlslb_lane_u32(svuint32_t op1, svuint16_t op2, svuint16_t op3)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}}
+  return SVE_ACLE_FUNC(svmlslb_lane,_u32,,)(op1, op2, op3, 8);
+}
+
+svuint64_t test_svmlslb_lane_u64(svuint64_t op1, svuint32_t op2, svuint32_t op3)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}}
+  return SVE_ACLE_FUNC(svmlslb_lane,_u64,,)(op1, op2, op3, 4);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_mlslt.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_mlslt.c
new file mode 100644
index 000000000000..f774436d2933
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_mlslt.c
@@ -0,0 +1,35 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+#include <arm_sve.h>
+
+svint32_t test_svmlslt_lane_s32(svint32_t op1, svint16_t op2, svint16_t op3)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}}
+  return SVE_ACLE_FUNC(svmlslt_lane,_s32,,)(op1, op2, op3, -1);
+}
+
+svint64_t test_svmlslt_lane_s64(svint64_t op1, svint32_t op2, svint32_t op3)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}}
+  return SVE_ACLE_FUNC(svmlslt_lane,_s64,,)(op1, op2, op3, -1);
+}
+
+svuint32_t test_svmlslt_lane_u32(svuint32_t op1, svuint16_t op2, svuint16_t op3)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}}
+  return SVE_ACLE_FUNC(svmlslt_lane,_u32,,)(op1, op2, op3, 8);
+}
+
+svuint64_t test_svmlslt_lane_u64(svuint64_t op1, svuint32_t op2, svuint32_t op3)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}}
+  return SVE_ACLE_FUNC(svmlslt_lane,_u64,,)(op1, op2, op3, 4);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_mullb.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_mullb.c
new file mode 100644
index 000000000000..010e969d5a58
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_mullb.c
@@ -0,0 +1,35 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+#include <arm_sve.h>
+
+svint32_t test_svmullb_lane_s32(svint16_t op1, svint16_t op2)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}}
+  return SVE_ACLE_FUNC(svmullb_lane,_s32,,)(op1, op2, -1);
+}
+
+svint64_t test_svmullb_lane_s64(svint32_t op1, svint32_t op2)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}}
+  return SVE_ACLE_FUNC(svmullb_lane,_s64,,)(op1, op2, 4);
+}
+
+svuint32_t test_svmullb_lane_u32(svuint16_t op1, svuint16_t op2)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}}
+  return SVE_ACLE_FUNC(svmullb_lane,_u32,,)(op1, op2, 8);
+}
+
+svuint64_t test_svmullb_lane_u64(svuint32_t op1, svuint32_t op2)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}}
+  return SVE_ACLE_FUNC(svmullb_lane,_u64,,)(op1, op2, -1);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_mullt.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_mullt.c
new file mode 100644
index 000000000000..3dca8dd37e34
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_mullt.c
@@ -0,0 +1,35 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+#include <arm_sve.h>
+
+svint32_t test_svmullt_lane_s32(svint16_t op1, svint16_t op2)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}}
+  return SVE_ACLE_FUNC(svmullt_lane,_s32,,)(op1, op2, 8);
+}
+
+svint64_t test_svmullt_lane_s64(svint32_t op1, svint32_t op2)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}}
+  return SVE_ACLE_FUNC(svmullt_lane,_s64,,)(op1, op2, -1);
+}
+
+svuint32_t test_svmullt_lane_u32(svuint16_t op1, svuint16_t op2)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}}
+  return SVE_ACLE_FUNC(svmullt_lane,_u32,,)(op1, op2, -1);
+}
+
+svuint64_t test_svmullt_lane_u64(svuint32_t op1, svuint32_t op2)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}}
+  return SVE_ACLE_FUNC(svmullt_lane,_u64,,)(op1, op2, 4);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_qdmlalb.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_qdmlalb.c
new file mode 100644
index 000000000000..fc40b7010ceb
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_qdmlalb.c
@@ -0,0 +1,23 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+#include <arm_sve.h>
+
+svint32_t test_svqdmlalb_lane_s32(svint32_t op1, svint16_t op2, svint16_t op3)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}}
+  return SVE_ACLE_FUNC(svqdmlalb_lane,_s32,,)(op1, op2, op3, -1);
+}
+
+svint64_t test_svqdmlalb_lane_s64(svint64_t op1, svint32_t op2, svint32_t op3)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}}
+  return SVE_ACLE_FUNC(svqdmlalb_lane,_s64,,)(op1, op2, op3, 4);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_qdmlalt.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_qdmlalt.c
new file mode 100644
index 000000000000..32d28bfaf87a
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_qdmlalt.c
@@ -0,0 +1,23 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+#include <arm_sve.h>
+
+svint32_t test_svqdmlalt_lane_s32(svint32_t op1, svint16_t op2, svint16_t op3)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}}
+  return SVE_ACLE_FUNC(svqdmlalt_lane,_s32,,)(op1, op2, op3, -1);
+}
+
+svint64_t test_svqdmlalt_lane_s64(svint64_t op1, svint32_t op2, svint32_t op3)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}}
+  return SVE_ACLE_FUNC(svqdmlalt_lane,_s64,,)(op1, op2, op3, 4);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_qdmlslb.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_qdmlslb.c
new file mode 100644
index 000000000000..6e5203d3c45d
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_qdmlslb.c
@@ -0,0 +1,23 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+#include <arm_sve.h>
+
+svint32_t test_svqdmlslb_lane_s32(svint32_t op1, svint16_t op2, svint16_t op3)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}}
+  return SVE_ACLE_FUNC(svqdmlslb_lane,_s32,,)(op1, op2, op3, -1);
+}
+
+svint64_t test_svqdmlslb_lane_s64(svint64_t op1, svint32_t op2, svint32_t op3)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}}
+  return SVE_ACLE_FUNC(svqdmlslb_lane,_s64,,)(op1, op2, op3, 4);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_qdmlslt.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_qdmlslt.c
new file mode 100644
index 000000000000..f47dc027c00d
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_qdmlslt.c
@@ -0,0 +1,23 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+#include <arm_sve.h>
+
+svint32_t test_svqdmlslt_lane_s32(svint32_t op1, svint16_t op2, svint16_t op3)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}}
+  return SVE_ACLE_FUNC(svqdmlslt_lane,_s32,,)(op1, op2, op3, -1);
+}
+
+svint64_t test_svqdmlslt_lane_s64(svint64_t op1, svint32_t op2, svint32_t op3)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}}
+  return SVE_ACLE_FUNC(svqdmlslt_lane,_s64,,)(op1, op2, op3, 4);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_qdmullb.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_qdmullb.c
new file mode 100644
index 000000000000..f40c221f286a
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_qdmullb.c
@@ -0,0 +1,23 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+#include <arm_sve.h>
+
+svint32_t test_svqdmullb_lane_s32(svint16_t op1, svint16_t op2)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}}
+  return SVE_ACLE_FUNC(svqdmullb_lane,_s32,,)(op1, op2, -1);
+}
+
+svint64_t test_svqdmullb_lane_s64(svint32_t op1, svint32_t op2)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}}
+  return SVE_ACLE_FUNC(svqdmullb_lane,_s64,,)(op1, op2, 4);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_qdmullt.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_qdmullt.c
new file mode 100644
index 000000000000..85b96b4ef777
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_qdmullt.c
@@ -0,0 +1,23 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+#include <arm_sve.h>
+
+svint32_t test_svqdmullt_lane_s32(svint16_t op1, svint16_t op2)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}}
+  return SVE_ACLE_FUNC(svqdmullt_lane,_s32,,)(op1, op2, -1);
+}
+
+svint64_t test_svqdmullt_lane_s64(svint32_t op1, svint32_t op2)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 3]}}
+  return SVE_ACLE_FUNC(svqdmullt_lane,_s64,,)(op1, op2, -1);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_shllb.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_shllb.c
new file mode 100644
index 000000000000..79a7b5929937
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_shllb.c
@@ -0,0 +1,47 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+#include <arm_sve.h>
+
+svint16_t test_svshllb_n_s16(svint8_t op1)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}}
+  return SVE_ACLE_FUNC(svshllb,_n_s16,,)(op1, -1);
+}
+
+svint32_t test_svshllb_n_s32(svint16_t op1)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 15]}}
+  return SVE_ACLE_FUNC(svshllb,_n_s32,,)(op1, -1);
+}
+
+svint64_t test_svshllb_n_s64(svint32_t op1)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 31]}}
+  return SVE_ACLE_FUNC(svshllb,_n_s64,,)(op1, -1);
+}
+
+svuint16_t test_svshllb_n_u16(svuint8_t op1)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}}
+  return SVE_ACLE_FUNC(svshllb,_n_u16,,)(op1, -1);
+}
+
+svuint32_t test_svshllb_n_u32(svuint16_t op1)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 15]}}
+  return SVE_ACLE_FUNC(svshllb,_n_u32,,)(op1, -1);
+}
+
+svuint64_t test_svshllb_n_u64(svuint32_t op1)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 31]}}
+  return SVE_ACLE_FUNC(svshllb,_n_u64,,)(op1, -1);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_shllt.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_shllt.c
new file mode 100644
index 000000000000..e989544048f5
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_shllt.c
@@ -0,0 +1,47 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+#include <arm_sve.h>
+
+svint16_t test_svshllt_n_s16(svint8_t op1)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}}
+  return SVE_ACLE_FUNC(svshllt,_n_s16,,)(op1, -1);
+}
+
+svint32_t test_svshllt_n_s32(svint16_t op1)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 15]}}
+  return SVE_ACLE_FUNC(svshllt,_n_s32,,)(op1, -1);
+}
+
+svint64_t test_svshllt_n_s64(svint32_t op1)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 31]}}
+  return SVE_ACLE_FUNC(svshllt,_n_s64,,)(op1, -1);
+}
+
+svuint16_t test_svshllt_n_u16(svuint8_t op1)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 7]}}
+  return SVE_ACLE_FUNC(svshllt,_n_u16,,)(op1, -1);
+}
+
+svuint32_t test_svshllt_n_u32(svuint16_t op1)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 15]}}
+  return SVE_ACLE_FUNC(svshllt,_n_u32,,)(op1, -1);
+}
+
+svuint64_t test_svshllt_n_u64(svuint32_t op1)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [0, 31]}}
+  return SVE_ACLE_FUNC(svshllt,_n_u64,,)(op1, -1);
+}


        


More information about the cfe-commits mailing list