[clang] 36aab0c - [SveEmitter] Add builtins for SVE2 Bitwise ternary logical instructions

Sander de Smalen via cfe-commits cfe-commits at lists.llvm.org
Thu May 7 07:22:05 PDT 2020


Author: Sander de Smalen
Date: 2020-05-07T15:21:37+01:00
New Revision: 36aab0c055a93d95e84606368e088bd1559e4ccb

URL: https://github.com/llvm/llvm-project/commit/36aab0c055a93d95e84606368e088bd1559e4ccb
DIFF: https://github.com/llvm/llvm-project/commit/36aab0c055a93d95e84606368e088bd1559e4ccb.diff

LOG: [SveEmitter] Add builtins for SVE2 Bitwise ternary logical instructions

This patch adds builtins for:
- svbcax
- svbsl
- svbsl1n
- svbsl2n
- sveor3
- svnbsl
- svxar

Added: 
    clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_bcax.c
    clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_bsl.c
    clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_bsl1n.c
    clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_bsl2n.c
    clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_eor3.c
    clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_nbsl.c
    clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_xar.c
    clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_xar.c

Modified: 
    clang/include/clang/Basic/arm_sve.td

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/Basic/arm_sve.td b/clang/include/clang/Basic/arm_sve.td
index a187591cd4d1..efdc892eb66a 100644
--- a/clang/include/clang/Basic/arm_sve.td
+++ b/clang/include/clang/Basic/arm_sve.td
@@ -1360,6 +1360,27 @@ def SVADALP_U_X : SInst<"svadalp[_{d}]", "dPdh", "UsUiUl", MergeAny,  "aarch64_s
 def SVADALP_U_Z : SInst<"svadalp[_{d}]", "dPdh", "UsUiUl", MergeZero, "aarch64_sve_uadalp">;
 }
 
+////////////////////////////////////////////////////////////////////////////////
+// SVE2 - Bitwise ternary logical instructions
+//
+
+let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
+def SVBCAX  : SInst<"svbcax[_{d}]",  "dddd", "csilUcUsUiUl", MergeNone, "aarch64_sve_bcax">;
+def SVBSL   : SInst<"svbsl[_{d}]",   "dddd", "csilUcUsUiUl", MergeNone, "aarch64_sve_bsl">;
+def SVBSL1N : SInst<"svbsl1n[_{d}]", "dddd", "csilUcUsUiUl", MergeNone, "aarch64_sve_bsl1n">;
+def SVBSL2N : SInst<"svbsl2n[_{d}]", "dddd", "csilUcUsUiUl", MergeNone, "aarch64_sve_bsl2n">;
+def SVEOR3  : SInst<"sveor3[_{d}]",  "dddd", "csilUcUsUiUl", MergeNone, "aarch64_sve_eor3">;
+def SVNBSL  : SInst<"svnbsl[_{d}]",  "dddd", "csilUcUsUiUl", MergeNone, "aarch64_sve_nbsl">;
+
+def SVBCAX_N  : SInst<"svbcax[_n_{d}]",  "ddda", "csilUcUsUiUl", MergeNone, "aarch64_sve_bcax">;
+def SVBSL_N   : SInst<"svbsl[_n_{d}]",   "ddda", "csilUcUsUiUl", MergeNone, "aarch64_sve_bsl">;
+def SVBSL1N_N : SInst<"svbsl1n[_n_{d}]", "ddda", "csilUcUsUiUl", MergeNone, "aarch64_sve_bsl1n">;
+def SVBSL2N_N : SInst<"svbsl2n[_n_{d}]", "ddda", "csilUcUsUiUl", MergeNone, "aarch64_sve_bsl2n">;
+def SVEOR3_N  : SInst<"sveor3[_n_{d}]",  "ddda", "csilUcUsUiUl", MergeNone, "aarch64_sve_eor3">;
+def SVNBSL_N  : SInst<"svnbsl[_n_{d}]",  "ddda", "csilUcUsUiUl", MergeNone, "aarch64_sve_nbsl">;
+def SVXAR_N   : SInst<"svxar[_n_{d}]",   "dddi", "csilUcUsUiUl", MergeNone, "aarch64_sve_xar", [], [ImmCheck<2, ImmCheckShiftRight, 1>]>;
+}
+
 ////////////////////////////////////////////////////////////////////////////////
 // SVE2 - Non-temporal gather/scatter
 let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {

diff  --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_bcax.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_bcax.c
new file mode 100644
index 000000000000..3cde14ad65df
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_bcax.c
@@ -0,0 +1,181 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svint8_t test_svbcax_s8(svint8_t op1, svint8_t op2, svint8_t op3)
+{
+  // CHECK-LABEL: test_svbcax_s8
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.bcax.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3)
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbcax'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbcax_s8'}}
+  return SVE_ACLE_FUNC(svbcax,_s8,,)(op1, op2, op3);
+}
+
+svint16_t test_svbcax_s16(svint16_t op1, svint16_t op2, svint16_t op3)
+{
+  // CHECK-LABEL: test_svbcax_s16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.bcax.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbcax'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbcax_s16'}}
+  return SVE_ACLE_FUNC(svbcax,_s16,,)(op1, op2, op3);
+}
+
+svint32_t test_svbcax_s32(svint32_t op1, svint32_t op2, svint32_t op3)
+{
+  // CHECK-LABEL: test_svbcax_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.bcax.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbcax'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbcax_s32'}}
+  return SVE_ACLE_FUNC(svbcax,_s32,,)(op1, op2, op3);
+}
+
+svint64_t test_svbcax_s64(svint64_t op1, svint64_t op2, svint64_t op3)
+{
+  // CHECK-LABEL: test_svbcax_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.bcax.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %op3)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbcax'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbcax_s64'}}
+  return SVE_ACLE_FUNC(svbcax,_s64,,)(op1, op2, op3);
+}
+
+svuint8_t test_svbcax_u8(svuint8_t op1, svuint8_t op2, svuint8_t op3)
+{
+  // CHECK-LABEL: test_svbcax_u8
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.bcax.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3)
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbcax'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbcax_u8'}}
+  return SVE_ACLE_FUNC(svbcax,_u8,,)(op1, op2, op3);
+}
+
+svuint16_t test_svbcax_u16(svuint16_t op1, svuint16_t op2, svuint16_t op3)
+{
+  // CHECK-LABEL: test_svbcax_u16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.bcax.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbcax'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbcax_u16'}}
+  return SVE_ACLE_FUNC(svbcax,_u16,,)(op1, op2, op3);
+}
+
+svuint32_t test_svbcax_u32(svuint32_t op1, svuint32_t op2, svuint32_t op3)
+{
+  // CHECK-LABEL: test_svbcax_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.bcax.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbcax'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbcax_u32'}}
+  return SVE_ACLE_FUNC(svbcax,_u32,,)(op1, op2, op3);
+}
+
+svuint64_t test_svbcax_u64(svuint64_t op1, svuint64_t op2, svuint64_t op3)
+{
+  // CHECK-LABEL: test_svbcax_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.bcax.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %op3)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbcax'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbcax_u64'}}
+  return SVE_ACLE_FUNC(svbcax,_u64,,)(op1, op2, op3);
+}
+
+svint8_t test_svbcax_n_s8(svint8_t op1, svint8_t op2, int8_t op3)
+{
+  // CHECK-LABEL: test_svbcax_n_s8
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.bcax.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbcax'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbcax_n_s8'}}
+  return SVE_ACLE_FUNC(svbcax,_n_s8,,)(op1, op2, op3);
+}
+
+svint16_t test_svbcax_n_s16(svint16_t op1, svint16_t op2, int16_t op3)
+{
+  // CHECK-LABEL: test_svbcax_n_s16
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.bcax.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbcax'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbcax_n_s16'}}
+  return SVE_ACLE_FUNC(svbcax,_n_s16,,)(op1, op2, op3);
+}
+
+svint32_t test_svbcax_n_s32(svint32_t op1, svint32_t op2, int32_t op3)
+{
+  // CHECK-LABEL: test_svbcax_n_s32
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.bcax.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbcax'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbcax_n_s32'}}
+  return SVE_ACLE_FUNC(svbcax,_n_s32,,)(op1, op2, op3);
+}
+
+svint64_t test_svbcax_n_s64(svint64_t op1, svint64_t op2, int64_t op3)
+{
+  // CHECK-LABEL: test_svbcax_n_s64
+  // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.bcax.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbcax'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbcax_n_s64'}}
+  return SVE_ACLE_FUNC(svbcax,_n_s64,,)(op1, op2, op3);
+}
+
+svuint8_t test_svbcax_n_u8(svuint8_t op1, svuint8_t op2, uint8_t op3)
+{
+  // CHECK-LABEL: test_svbcax_n_u8
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.bcax.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbcax'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbcax_n_u8'}}
+  return SVE_ACLE_FUNC(svbcax,_n_u8,,)(op1, op2, op3);
+}
+
+svuint16_t test_svbcax_n_u16(svuint16_t op1, svuint16_t op2, uint16_t op3)
+{
+  // CHECK-LABEL: test_svbcax_n_u16
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.bcax.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbcax'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbcax_n_u16'}}
+  return SVE_ACLE_FUNC(svbcax,_n_u16,,)(op1, op2, op3);
+}
+
+svuint32_t test_svbcax_n_u32(svuint32_t op1, svuint32_t op2, uint32_t op3)
+{
+  // CHECK-LABEL: test_svbcax_n_u32
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.bcax.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbcax'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbcax_n_u32'}}
+  return SVE_ACLE_FUNC(svbcax,_n_u32,,)(op1, op2, op3);
+}
+
+svuint64_t test_svbcax_n_u64(svuint64_t op1, svuint64_t op2, uint64_t op3)
+{
+  // CHECK-LABEL: test_svbcax_n_u64
+  // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.bcax.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbcax'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbcax_n_u64'}}
+  return SVE_ACLE_FUNC(svbcax,_n_u64,,)(op1, op2, op3);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_bsl.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_bsl.c
new file mode 100644
index 000000000000..982d04b7d7cf
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_bsl.c
@@ -0,0 +1,181 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svint8_t test_svbsl_s8(svint8_t op1, svint8_t op2, svint8_t op3)
+{
+  // CHECK-LABEL: test_svbsl_s8
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.bsl.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3)
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbsl'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbsl_s8'}}
+  return SVE_ACLE_FUNC(svbsl,_s8,,)(op1, op2, op3);
+}
+
+svint16_t test_svbsl_s16(svint16_t op1, svint16_t op2, svint16_t op3)
+{
+  // CHECK-LABEL: test_svbsl_s16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.bsl.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbsl'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbsl_s16'}}
+  return SVE_ACLE_FUNC(svbsl,_s16,,)(op1, op2, op3);
+}
+
+svint32_t test_svbsl_s32(svint32_t op1, svint32_t op2, svint32_t op3)
+{
+  // CHECK-LABEL: test_svbsl_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.bsl.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbsl'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbsl_s32'}}
+  return SVE_ACLE_FUNC(svbsl,_s32,,)(op1, op2, op3);
+}
+
+svint64_t test_svbsl_s64(svint64_t op1, svint64_t op2, svint64_t op3)
+{
+  // CHECK-LABEL: test_svbsl_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.bsl.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %op3)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbsl'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbsl_s64'}}
+  return SVE_ACLE_FUNC(svbsl,_s64,,)(op1, op2, op3);
+}
+
+svuint8_t test_svbsl_u8(svuint8_t op1, svuint8_t op2, svuint8_t op3)
+{
+  // CHECK-LABEL: test_svbsl_u8
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.bsl.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3)
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbsl'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbsl_u8'}}
+  return SVE_ACLE_FUNC(svbsl,_u8,,)(op1, op2, op3);
+}
+
+svuint16_t test_svbsl_u16(svuint16_t op1, svuint16_t op2, svuint16_t op3)
+{
+  // CHECK-LABEL: test_svbsl_u16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.bsl.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbsl'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbsl_u16'}}
+  return SVE_ACLE_FUNC(svbsl,_u16,,)(op1, op2, op3);
+}
+
+svuint32_t test_svbsl_u32(svuint32_t op1, svuint32_t op2, svuint32_t op3)
+{
+  // CHECK-LABEL: test_svbsl_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.bsl.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbsl'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbsl_u32'}}
+  return SVE_ACLE_FUNC(svbsl,_u32,,)(op1, op2, op3);
+}
+
+svuint64_t test_svbsl_u64(svuint64_t op1, svuint64_t op2, svuint64_t op3)
+{
+  // CHECK-LABEL: test_svbsl_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.bsl.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %op3)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbsl'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbsl_u64'}}
+  return SVE_ACLE_FUNC(svbsl,_u64,,)(op1, op2, op3);
+}
+
+svint8_t test_svbsl_n_s8(svint8_t op1, svint8_t op2, int8_t op3)
+{
+  // CHECK-LABEL: test_svbsl_n_s8
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.bsl.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbsl'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbsl_n_s8'}}
+  return SVE_ACLE_FUNC(svbsl,_n_s8,,)(op1, op2, op3);
+}
+
+svint16_t test_svbsl_n_s16(svint16_t op1, svint16_t op2, int16_t op3)
+{
+  // CHECK-LABEL: test_svbsl_n_s16
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.bsl.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbsl'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbsl_n_s16'}}
+  return SVE_ACLE_FUNC(svbsl,_n_s16,,)(op1, op2, op3);
+}
+
+svint32_t test_svbsl_n_s32(svint32_t op1, svint32_t op2, int32_t op3)
+{
+  // CHECK-LABEL: test_svbsl_n_s32
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.bsl.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbsl'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbsl_n_s32'}}
+  return SVE_ACLE_FUNC(svbsl,_n_s32,,)(op1, op2, op3);
+}
+
+svint64_t test_svbsl_n_s64(svint64_t op1, svint64_t op2, int64_t op3)
+{
+  // CHECK-LABEL: test_svbsl_n_s64
+  // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.bsl.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbsl'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbsl_n_s64'}}
+  return SVE_ACLE_FUNC(svbsl,_n_s64,,)(op1, op2, op3);
+}
+
+svuint8_t test_svbsl_n_u8(svuint8_t op1, svuint8_t op2, uint8_t op3)
+{
+  // CHECK-LABEL: test_svbsl_n_u8
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.bsl.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbsl'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbsl_n_u8'}}
+  return SVE_ACLE_FUNC(svbsl,_n_u8,,)(op1, op2, op3);
+}
+
+svuint16_t test_svbsl_n_u16(svuint16_t op1, svuint16_t op2, uint16_t op3)
+{
+  // CHECK-LABEL: test_svbsl_n_u16
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.bsl.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbsl'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbsl_n_u16'}}
+  return SVE_ACLE_FUNC(svbsl,_n_u16,,)(op1, op2, op3);
+}
+
+svuint32_t test_svbsl_n_u32(svuint32_t op1, svuint32_t op2, uint32_t op3)
+{
+  // CHECK-LABEL: test_svbsl_n_u32
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.bsl.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbsl'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbsl_n_u32'}}
+  return SVE_ACLE_FUNC(svbsl,_n_u32,,)(op1, op2, op3);
+}
+
+svuint64_t test_svbsl_n_u64(svuint64_t op1, svuint64_t op2, uint64_t op3)
+{
+  // CHECK-LABEL: test_svbsl_n_u64
+  // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.bsl.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbsl'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbsl_n_u64'}}
+  return SVE_ACLE_FUNC(svbsl,_n_u64,,)(op1, op2, op3);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_bsl1n.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_bsl1n.c
new file mode 100644
index 000000000000..85c0951693e9
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_bsl1n.c
@@ -0,0 +1,181 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svint8_t test_svbsl1n_s8(svint8_t op1, svint8_t op2, svint8_t op3)
+{
+  // CHECK-LABEL: test_svbsl1n_s8
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.bsl1n.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3)
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbsl1n'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbsl1n_s8'}}
+  return SVE_ACLE_FUNC(svbsl1n,_s8,,)(op1, op2, op3);
+}
+
+svint16_t test_svbsl1n_s16(svint16_t op1, svint16_t op2, svint16_t op3)
+{
+  // CHECK-LABEL: test_svbsl1n_s16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.bsl1n.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbsl1n'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbsl1n_s16'}}
+  return SVE_ACLE_FUNC(svbsl1n,_s16,,)(op1, op2, op3);
+}
+
+svint32_t test_svbsl1n_s32(svint32_t op1, svint32_t op2, svint32_t op3)
+{
+  // CHECK-LABEL: test_svbsl1n_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.bsl1n.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbsl1n'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbsl1n_s32'}}
+  return SVE_ACLE_FUNC(svbsl1n,_s32,,)(op1, op2, op3);
+}
+
+svint64_t test_svbsl1n_s64(svint64_t op1, svint64_t op2, svint64_t op3)
+{
+  // CHECK-LABEL: test_svbsl1n_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.bsl1n.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %op3)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbsl1n'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbsl1n_s64'}}
+  return SVE_ACLE_FUNC(svbsl1n,_s64,,)(op1, op2, op3);
+}
+
+svuint8_t test_svbsl1n_u8(svuint8_t op1, svuint8_t op2, svuint8_t op3)
+{
+  // CHECK-LABEL: test_svbsl1n_u8
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.bsl1n.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3)
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbsl1n'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbsl1n_u8'}}
+  return SVE_ACLE_FUNC(svbsl1n,_u8,,)(op1, op2, op3);
+}
+
+svuint16_t test_svbsl1n_u16(svuint16_t op1, svuint16_t op2, svuint16_t op3)
+{
+  // CHECK-LABEL: test_svbsl1n_u16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.bsl1n.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbsl1n'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbsl1n_u16'}}
+  return SVE_ACLE_FUNC(svbsl1n,_u16,,)(op1, op2, op3);
+}
+
+svuint32_t test_svbsl1n_u32(svuint32_t op1, svuint32_t op2, svuint32_t op3)
+{
+  // CHECK-LABEL: test_svbsl1n_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.bsl1n.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbsl1n'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbsl1n_u32'}}
+  return SVE_ACLE_FUNC(svbsl1n,_u32,,)(op1, op2, op3);
+}
+
+svuint64_t test_svbsl1n_u64(svuint64_t op1, svuint64_t op2, svuint64_t op3)
+{
+  // CHECK-LABEL: test_svbsl1n_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.bsl1n.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %op3)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbsl1n'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbsl1n_u64'}}
+  return SVE_ACLE_FUNC(svbsl1n,_u64,,)(op1, op2, op3);
+}
+
+svint8_t test_svbsl1n_n_s8(svint8_t op1, svint8_t op2, int8_t op3)
+{
+  // CHECK-LABEL: test_svbsl1n_n_s8
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.bsl1n.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbsl1n'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbsl1n_n_s8'}}
+  return SVE_ACLE_FUNC(svbsl1n,_n_s8,,)(op1, op2, op3);
+}
+
+svint16_t test_svbsl1n_n_s16(svint16_t op1, svint16_t op2, int16_t op3)
+{
+  // CHECK-LABEL: test_svbsl1n_n_s16
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.bsl1n.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbsl1n'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbsl1n_n_s16'}}
+  return SVE_ACLE_FUNC(svbsl1n,_n_s16,,)(op1, op2, op3);
+}
+
+svint32_t test_svbsl1n_n_s32(svint32_t op1, svint32_t op2, int32_t op3)
+{
+  // CHECK-LABEL: test_svbsl1n_n_s32
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.bsl1n.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbsl1n'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbsl1n_n_s32'}}
+  return SVE_ACLE_FUNC(svbsl1n,_n_s32,,)(op1, op2, op3);
+}
+
+svint64_t test_svbsl1n_n_s64(svint64_t op1, svint64_t op2, int64_t op3)
+{
+  // CHECK-LABEL: test_svbsl1n_n_s64
+  // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.bsl1n.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbsl1n'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbsl1n_n_s64'}}
+  return SVE_ACLE_FUNC(svbsl1n,_n_s64,,)(op1, op2, op3);
+}
+
+svuint8_t test_svbsl1n_n_u8(svuint8_t op1, svuint8_t op2, uint8_t op3)
+{
+  // CHECK-LABEL: test_svbsl1n_n_u8
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.bsl1n.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbsl1n'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbsl1n_n_u8'}}
+  return SVE_ACLE_FUNC(svbsl1n,_n_u8,,)(op1, op2, op3);
+}
+
+svuint16_t test_svbsl1n_n_u16(svuint16_t op1, svuint16_t op2, uint16_t op3)
+{
+  // CHECK-LABEL: test_svbsl1n_n_u16
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.bsl1n.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbsl1n'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbsl1n_n_u16'}}
+  return SVE_ACLE_FUNC(svbsl1n,_n_u16,,)(op1, op2, op3);
+}
+
+svuint32_t test_svbsl1n_n_u32(svuint32_t op1, svuint32_t op2, uint32_t op3)
+{
+  // CHECK-LABEL: test_svbsl1n_n_u32
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.bsl1n.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbsl1n'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbsl1n_n_u32'}}
+  return SVE_ACLE_FUNC(svbsl1n,_n_u32,,)(op1, op2, op3);
+}
+
+svuint64_t test_svbsl1n_n_u64(svuint64_t op1, svuint64_t op2, uint64_t op3)
+{
+  // CHECK-LABEL: test_svbsl1n_n_u64
+  // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.bsl1n.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbsl1n'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbsl1n_n_u64'}}
+  return SVE_ACLE_FUNC(svbsl1n,_n_u64,,)(op1, op2, op3);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_bsl2n.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_bsl2n.c
new file mode 100644
index 000000000000..a47dc2d190fe
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_bsl2n.c
@@ -0,0 +1,181 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svint8_t test_svbsl2n_s8(svint8_t op1, svint8_t op2, svint8_t op3)
+{
+  // CHECK-LABEL: test_svbsl2n_s8
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.bsl2n.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3)
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbsl2n'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbsl2n_s8'}}
+  return SVE_ACLE_FUNC(svbsl2n,_s8,,)(op1, op2, op3);
+}
+
+svint16_t test_svbsl2n_s16(svint16_t op1, svint16_t op2, svint16_t op3)
+{
+  // CHECK-LABEL: test_svbsl2n_s16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.bsl2n.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbsl2n'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbsl2n_s16'}}
+  return SVE_ACLE_FUNC(svbsl2n,_s16,,)(op1, op2, op3);
+}
+
+svint32_t test_svbsl2n_s32(svint32_t op1, svint32_t op2, svint32_t op3)
+{
+  // CHECK-LABEL: test_svbsl2n_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.bsl2n.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbsl2n'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbsl2n_s32'}}
+  return SVE_ACLE_FUNC(svbsl2n,_s32,,)(op1, op2, op3);
+}
+
+svint64_t test_svbsl2n_s64(svint64_t op1, svint64_t op2, svint64_t op3)
+{
+  // CHECK-LABEL: test_svbsl2n_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.bsl2n.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %op3)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbsl2n'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbsl2n_s64'}}
+  return SVE_ACLE_FUNC(svbsl2n,_s64,,)(op1, op2, op3);
+}
+
+svuint8_t test_svbsl2n_u8(svuint8_t op1, svuint8_t op2, svuint8_t op3)
+{
+  // CHECK-LABEL: test_svbsl2n_u8
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.bsl2n.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3)
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbsl2n'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbsl2n_u8'}}
+  return SVE_ACLE_FUNC(svbsl2n,_u8,,)(op1, op2, op3);
+}
+
+svuint16_t test_svbsl2n_u16(svuint16_t op1, svuint16_t op2, svuint16_t op3)
+{
+  // CHECK-LABEL: test_svbsl2n_u16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.bsl2n.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbsl2n'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbsl2n_u16'}}
+  return SVE_ACLE_FUNC(svbsl2n,_u16,,)(op1, op2, op3);
+}
+
+svuint32_t test_svbsl2n_u32(svuint32_t op1, svuint32_t op2, svuint32_t op3)
+{
+  // CHECK-LABEL: test_svbsl2n_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.bsl2n.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbsl2n'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbsl2n_u32'}}
+  return SVE_ACLE_FUNC(svbsl2n,_u32,,)(op1, op2, op3);
+}
+
+svuint64_t test_svbsl2n_u64(svuint64_t op1, svuint64_t op2, svuint64_t op3)
+{
+  // CHECK-LABEL: test_svbsl2n_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.bsl2n.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %op3)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbsl2n'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbsl2n_u64'}}
+  return SVE_ACLE_FUNC(svbsl2n,_u64,,)(op1, op2, op3);
+}
+
+svint8_t test_svbsl2n_n_s8(svint8_t op1, svint8_t op2, int8_t op3)
+{
+  // CHECK-LABEL: test_svbsl2n_n_s8
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.bsl2n.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbsl2n'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbsl2n_n_s8'}}
+  return SVE_ACLE_FUNC(svbsl2n,_n_s8,,)(op1, op2, op3);
+}
+
+svint16_t test_svbsl2n_n_s16(svint16_t op1, svint16_t op2, int16_t op3)
+{
+  // CHECK-LABEL: test_svbsl2n_n_s16
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.bsl2n.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbsl2n'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbsl2n_n_s16'}}
+  return SVE_ACLE_FUNC(svbsl2n,_n_s16,,)(op1, op2, op3);
+}
+
+svint32_t test_svbsl2n_n_s32(svint32_t op1, svint32_t op2, int32_t op3)
+{
+  // CHECK-LABEL: test_svbsl2n_n_s32
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.bsl2n.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbsl2n'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbsl2n_n_s32'}}
+  return SVE_ACLE_FUNC(svbsl2n,_n_s32,,)(op1, op2, op3);
+}
+
+svint64_t test_svbsl2n_n_s64(svint64_t op1, svint64_t op2, int64_t op3)
+{
+  // CHECK-LABEL: test_svbsl2n_n_s64
+  // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.bsl2n.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbsl2n'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbsl2n_n_s64'}}
+  return SVE_ACLE_FUNC(svbsl2n,_n_s64,,)(op1, op2, op3);
+}
+
+svuint8_t test_svbsl2n_n_u8(svuint8_t op1, svuint8_t op2, uint8_t op3)
+{
+  // CHECK-LABEL: test_svbsl2n_n_u8
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.bsl2n.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbsl2n'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbsl2n_n_u8'}}
+  return SVE_ACLE_FUNC(svbsl2n,_n_u8,,)(op1, op2, op3);
+}
+
+svuint16_t test_svbsl2n_n_u16(svuint16_t op1, svuint16_t op2, uint16_t op3)
+{
+  // CHECK-LABEL: test_svbsl2n_n_u16
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.bsl2n.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbsl2n'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbsl2n_n_u16'}}
+  return SVE_ACLE_FUNC(svbsl2n,_n_u16,,)(op1, op2, op3);
+}
+
+svuint32_t test_svbsl2n_n_u32(svuint32_t op1, svuint32_t op2, uint32_t op3)
+{
+  // CHECK-LABEL: test_svbsl2n_n_u32
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.bsl2n.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbsl2n'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbsl2n_n_u32'}}
+  return SVE_ACLE_FUNC(svbsl2n,_n_u32,,)(op1, op2, op3);
+}
+
+svuint64_t test_svbsl2n_n_u64(svuint64_t op1, svuint64_t op2, uint64_t op3)
+{
+  // CHECK-LABEL: test_svbsl2n_n_u64
+  // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.bsl2n.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svbsl2n'}}
+  // expected-warning at +1 {{implicit declaration of function 'svbsl2n_n_u64'}}
+  return SVE_ACLE_FUNC(svbsl2n,_n_u64,,)(op1, op2, op3);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_eor3.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_eor3.c
new file mode 100644
index 000000000000..634cb6537595
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_eor3.c
@@ -0,0 +1,181 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svint8_t test_sveor3_s8(svint8_t op1, svint8_t op2, svint8_t op3)
+{
+  // CHECK-LABEL: test_sveor3_s8
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.eor3.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3)
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'sveor3'}}
+  // expected-warning at +1 {{implicit declaration of function 'sveor3_s8'}}
+  return SVE_ACLE_FUNC(sveor3,_s8,,)(op1, op2, op3);
+}
+
+svint16_t test_sveor3_s16(svint16_t op1, svint16_t op2, svint16_t op3)
+{
+  // CHECK-LABEL: test_sveor3_s16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.eor3.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'sveor3'}}
+  // expected-warning at +1 {{implicit declaration of function 'sveor3_s16'}}
+  return SVE_ACLE_FUNC(sveor3,_s16,,)(op1, op2, op3);
+}
+
+svint32_t test_sveor3_s32(svint32_t op1, svint32_t op2, svint32_t op3)
+{
+  // CHECK-LABEL: test_sveor3_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.eor3.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'sveor3'}}
+  // expected-warning at +1 {{implicit declaration of function 'sveor3_s32'}}
+  return SVE_ACLE_FUNC(sveor3,_s32,,)(op1, op2, op3);
+}
+
+svint64_t test_sveor3_s64(svint64_t op1, svint64_t op2, svint64_t op3)
+{
+  // CHECK-LABEL: test_sveor3_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.eor3.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %op3)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'sveor3'}}
+  // expected-warning at +1 {{implicit declaration of function 'sveor3_s64'}}
+  return SVE_ACLE_FUNC(sveor3,_s64,,)(op1, op2, op3);
+}
+
+svuint8_t test_sveor3_u8(svuint8_t op1, svuint8_t op2, svuint8_t op3)
+{
+  // CHECK-LABEL: test_sveor3_u8
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.eor3.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3)
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'sveor3'}}
+  // expected-warning at +1 {{implicit declaration of function 'sveor3_u8'}}
+  return SVE_ACLE_FUNC(sveor3,_u8,,)(op1, op2, op3);
+}
+
+svuint16_t test_sveor3_u16(svuint16_t op1, svuint16_t op2, svuint16_t op3)
+{
+  // CHECK-LABEL: test_sveor3_u16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.eor3.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'sveor3'}}
+  // expected-warning at +1 {{implicit declaration of function 'sveor3_u16'}}
+  return SVE_ACLE_FUNC(sveor3,_u16,,)(op1, op2, op3);
+}
+
+svuint32_t test_sveor3_u32(svuint32_t op1, svuint32_t op2, svuint32_t op3)
+{
+  // CHECK-LABEL: test_sveor3_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.eor3.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'sveor3'}}
+  // expected-warning at +1 {{implicit declaration of function 'sveor3_u32'}}
+  return SVE_ACLE_FUNC(sveor3,_u32,,)(op1, op2, op3);
+}
+
+svuint64_t test_sveor3_u64(svuint64_t op1, svuint64_t op2, svuint64_t op3)
+{
+  // CHECK-LABEL: test_sveor3_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.eor3.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %op3)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'sveor3'}}
+  // expected-warning at +1 {{implicit declaration of function 'sveor3_u64'}}
+  return SVE_ACLE_FUNC(sveor3,_u64,,)(op1, op2, op3);
+}
+
+svint8_t test_sveor3_n_s8(svint8_t op1, svint8_t op2, int8_t op3)
+{
+  // CHECK-LABEL: test_sveor3_n_s8
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.eor3.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'sveor3'}}
+  // expected-warning at +1 {{implicit declaration of function 'sveor3_n_s8'}}
+  return SVE_ACLE_FUNC(sveor3,_n_s8,,)(op1, op2, op3);
+}
+
+svint16_t test_sveor3_n_s16(svint16_t op1, svint16_t op2, int16_t op3)
+{
+  // CHECK-LABEL: test_sveor3_n_s16
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.eor3.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'sveor3'}}
+  // expected-warning at +1 {{implicit declaration of function 'sveor3_n_s16'}}
+  return SVE_ACLE_FUNC(sveor3,_n_s16,,)(op1, op2, op3);
+}
+
+svint32_t test_sveor3_n_s32(svint32_t op1, svint32_t op2, int32_t op3)
+{
+  // CHECK-LABEL: test_sveor3_n_s32
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.eor3.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'sveor3'}}
+  // expected-warning at +1 {{implicit declaration of function 'sveor3_n_s32'}}
+  return SVE_ACLE_FUNC(sveor3,_n_s32,,)(op1, op2, op3);
+}
+
+svint64_t test_sveor3_n_s64(svint64_t op1, svint64_t op2, int64_t op3)
+{
+  // CHECK-LABEL: test_sveor3_n_s64
+  // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.eor3.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'sveor3'}}
+  // expected-warning at +1 {{implicit declaration of function 'sveor3_n_s64'}}
+  return SVE_ACLE_FUNC(sveor3,_n_s64,,)(op1, op2, op3);
+}
+
+svuint8_t test_sveor3_n_u8(svuint8_t op1, svuint8_t op2, uint8_t op3)
+{
+  // CHECK-LABEL: test_sveor3_n_u8
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.eor3.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'sveor3'}}
+  // expected-warning at +1 {{implicit declaration of function 'sveor3_n_u8'}}
+  return SVE_ACLE_FUNC(sveor3,_n_u8,,)(op1, op2, op3);
+}
+
+svuint16_t test_sveor3_n_u16(svuint16_t op1, svuint16_t op2, uint16_t op3)
+{
+  // CHECK-LABEL: test_sveor3_n_u16
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.eor3.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'sveor3'}}
+  // expected-warning at +1 {{implicit declaration of function 'sveor3_n_u16'}}
+  return SVE_ACLE_FUNC(sveor3,_n_u16,,)(op1, op2, op3);
+}
+
+svuint32_t test_sveor3_n_u32(svuint32_t op1, svuint32_t op2, uint32_t op3)
+{
+  // CHECK-LABEL: test_sveor3_n_u32
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.eor3.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'sveor3'}}
+  // expected-warning at +1 {{implicit declaration of function 'sveor3_n_u32'}}
+  return SVE_ACLE_FUNC(sveor3,_n_u32,,)(op1, op2, op3);
+}
+
+svuint64_t test_sveor3_n_u64(svuint64_t op1, svuint64_t op2, uint64_t op3)
+{
+  // CHECK-LABEL: test_sveor3_n_u64
+  // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.eor3.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'sveor3'}}
+  // expected-warning at +1 {{implicit declaration of function 'sveor3_n_u64'}}
+  return SVE_ACLE_FUNC(sveor3,_n_u64,,)(op1, op2, op3);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_nbsl.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_nbsl.c
new file mode 100644
index 000000000000..e20ca8299055
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_nbsl.c
@@ -0,0 +1,181 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svint8_t test_svnbsl_s8(svint8_t op1, svint8_t op2, svint8_t op3)
+{
+  // CHECK-LABEL: test_svnbsl_s8
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.nbsl.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3)
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svnbsl'}}
+  // expected-warning at +1 {{implicit declaration of function 'svnbsl_s8'}}
+  return SVE_ACLE_FUNC(svnbsl,_s8,,)(op1, op2, op3);
+}
+
+svint16_t test_svnbsl_s16(svint16_t op1, svint16_t op2, svint16_t op3)
+{
+  // CHECK-LABEL: test_svnbsl_s16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.nbsl.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svnbsl'}}
+  // expected-warning at +1 {{implicit declaration of function 'svnbsl_s16'}}
+  return SVE_ACLE_FUNC(svnbsl,_s16,,)(op1, op2, op3);
+}
+
+svint32_t test_svnbsl_s32(svint32_t op1, svint32_t op2, svint32_t op3)
+{
+  // CHECK-LABEL: test_svnbsl_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.nbsl.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svnbsl'}}
+  // expected-warning at +1 {{implicit declaration of function 'svnbsl_s32'}}
+  return SVE_ACLE_FUNC(svnbsl,_s32,,)(op1, op2, op3);
+}
+
+svint64_t test_svnbsl_s64(svint64_t op1, svint64_t op2, svint64_t op3)
+{
+  // CHECK-LABEL: test_svnbsl_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.nbsl.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %op3)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svnbsl'}}
+  // expected-warning at +1 {{implicit declaration of function 'svnbsl_s64'}}
+  return SVE_ACLE_FUNC(svnbsl,_s64,,)(op1, op2, op3);
+}
+
+svuint8_t test_svnbsl_u8(svuint8_t op1, svuint8_t op2, svuint8_t op3)
+{
+  // CHECK-LABEL: test_svnbsl_u8
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.nbsl.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %op3)
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svnbsl'}}
+  // expected-warning at +1 {{implicit declaration of function 'svnbsl_u8'}}
+  return SVE_ACLE_FUNC(svnbsl,_u8,,)(op1, op2, op3);
+}
+
+svuint16_t test_svnbsl_u16(svuint16_t op1, svuint16_t op2, svuint16_t op3)
+{
+  // CHECK-LABEL: test_svnbsl_u16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.nbsl.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %op3)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svnbsl'}}
+  // expected-warning at +1 {{implicit declaration of function 'svnbsl_u16'}}
+  return SVE_ACLE_FUNC(svnbsl,_u16,,)(op1, op2, op3);
+}
+
+svuint32_t test_svnbsl_u32(svuint32_t op1, svuint32_t op2, svuint32_t op3)
+{
+  // CHECK-LABEL: test_svnbsl_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.nbsl.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %op3)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svnbsl'}}
+  // expected-warning at +1 {{implicit declaration of function 'svnbsl_u32'}}
+  return SVE_ACLE_FUNC(svnbsl,_u32,,)(op1, op2, op3);
+}
+
+svuint64_t test_svnbsl_u64(svuint64_t op1, svuint64_t op2, svuint64_t op3)
+{
+  // CHECK-LABEL: test_svnbsl_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.nbsl.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %op3)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svnbsl'}}
+  // expected-warning at +1 {{implicit declaration of function 'svnbsl_u64'}}
+  return SVE_ACLE_FUNC(svnbsl,_u64,,)(op1, op2, op3);
+}
+
+svint8_t test_svnbsl_n_s8(svint8_t op1, svint8_t op2, int8_t op3)
+{
+  // CHECK-LABEL: test_svnbsl_n_s8
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.nbsl.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svnbsl'}}
+  // expected-warning at +1 {{implicit declaration of function 'svnbsl_n_s8'}}
+  return SVE_ACLE_FUNC(svnbsl,_n_s8,,)(op1, op2, op3);
+}
+
+svint16_t test_svnbsl_n_s16(svint16_t op1, svint16_t op2, int16_t op3)
+{
+  // CHECK-LABEL: test_svnbsl_n_s16
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.nbsl.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svnbsl'}}
+  // expected-warning at +1 {{implicit declaration of function 'svnbsl_n_s16'}}
+  return SVE_ACLE_FUNC(svnbsl,_n_s16,,)(op1, op2, op3);
+}
+
+svint32_t test_svnbsl_n_s32(svint32_t op1, svint32_t op2, int32_t op3)
+{
+  // CHECK-LABEL: test_svnbsl_n_s32
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.nbsl.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svnbsl'}}
+  // expected-warning at +1 {{implicit declaration of function 'svnbsl_n_s32'}}
+  return SVE_ACLE_FUNC(svnbsl,_n_s32,,)(op1, op2, op3);
+}
+
+svint64_t test_svnbsl_n_s64(svint64_t op1, svint64_t op2, int64_t op3)
+{
+  // CHECK-LABEL: test_svnbsl_n_s64
+  // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.nbsl.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svnbsl'}}
+  // expected-warning at +1 {{implicit declaration of function 'svnbsl_n_s64'}}
+  return SVE_ACLE_FUNC(svnbsl,_n_s64,,)(op1, op2, op3);
+}
+
+svuint8_t test_svnbsl_n_u8(svuint8_t op1, svuint8_t op2, uint8_t op3)
+{
+  // CHECK-LABEL: test_svnbsl_n_u8
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.nbsl.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, <vscale x 16 x i8> %[[DUP]])
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svnbsl'}}
+  // expected-warning at +1 {{implicit declaration of function 'svnbsl_n_u8'}}
+  return SVE_ACLE_FUNC(svnbsl,_n_u8,,)(op1, op2, op3);
+}
+
+svuint16_t test_svnbsl_n_u16(svuint16_t op1, svuint16_t op2, uint16_t op3)
+{
+  // CHECK-LABEL: test_svnbsl_n_u16
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.nbsl.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, <vscale x 8 x i16> %[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svnbsl'}}
+  // expected-warning at +1 {{implicit declaration of function 'svnbsl_n_u16'}}
+  return SVE_ACLE_FUNC(svnbsl,_n_u16,,)(op1, op2, op3);
+}
+
+svuint32_t test_svnbsl_n_u32(svuint32_t op1, svuint32_t op2, uint32_t op3)
+{
+  // CHECK-LABEL: test_svnbsl_n_u32
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.nbsl.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, <vscale x 4 x i32> %[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svnbsl'}}
+  // expected-warning at +1 {{implicit declaration of function 'svnbsl_n_u32'}}
+  return SVE_ACLE_FUNC(svnbsl,_n_u32,,)(op1, op2, op3);
+}
+
+svuint64_t test_svnbsl_n_u64(svuint64_t op1, svuint64_t op2, uint64_t op3)
+{
+  // CHECK-LABEL: test_svnbsl_n_u64
+  // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 %op3)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.nbsl.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, <vscale x 2 x i64> %[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svnbsl'}}
+  // expected-warning at +1 {{implicit declaration of function 'svnbsl_n_u64'}}
+  return SVE_ACLE_FUNC(svnbsl,_n_u64,,)(op1, op2, op3);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_xar.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_xar.c
new file mode 100644
index 000000000000..5cd7cf0c4fd4
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/acle_sve2_xar.c
@@ -0,0 +1,173 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svint8_t test_svxar_n_s8(svint8_t op1, svint8_t op2)
+{
+  // CHECK-LABEL: test_svxar_n_s8
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.xar.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, i32 1)
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svxar'}}
+  // expected-warning at +1 {{implicit declaration of function 'svxar_n_s8'}}
+  return SVE_ACLE_FUNC(svxar,_n_s8,,)(op1, op2, 1);
+}
+
+svint8_t test_svxar_n_s8_1(svint8_t op1, svint8_t op2)
+{
+  // CHECK-LABEL: test_svxar_n_s8_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.xar.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, i32 8)
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svxar'}}
+  // expected-warning at +1 {{implicit declaration of function 'svxar_n_s8'}}
+  return SVE_ACLE_FUNC(svxar,_n_s8,,)(op1, op2, 8);
+}
+
+svint16_t test_svxar_n_s16(svint16_t op1, svint16_t op2)
+{
+  // CHECK-LABEL: test_svxar_n_s16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.xar.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 1)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svxar'}}
+  // expected-warning at +1 {{implicit declaration of function 'svxar_n_s16'}}
+  return SVE_ACLE_FUNC(svxar,_n_s16,,)(op1, op2, 1);
+}
+
+svint16_t test_svxar_n_s16_1(svint16_t op1, svint16_t op2)
+{
+  // CHECK-LABEL: test_svxar_n_s16_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.xar.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 16)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svxar'}}
+  // expected-warning at +1 {{implicit declaration of function 'svxar_n_s16'}}
+  return SVE_ACLE_FUNC(svxar,_n_s16,,)(op1, op2, 16);
+}
+
+svint32_t test_svxar_n_s32(svint32_t op1, svint32_t op2)
+{
+  // CHECK-LABEL: test_svxar_n_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.xar.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 1)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svxar'}}
+  // expected-warning at +1 {{implicit declaration of function 'svxar_n_s32'}}
+  return SVE_ACLE_FUNC(svxar,_n_s32,,)(op1, op2, 1);
+}
+
+svint32_t test_svxar_n_s32_1(svint32_t op1, svint32_t op2)
+{
+  // CHECK-LABEL: test_svxar_n_s32_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.xar.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 32)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svxar'}}
+  // expected-warning at +1 {{implicit declaration of function 'svxar_n_s32'}}
+  return SVE_ACLE_FUNC(svxar,_n_s32,,)(op1, op2, 32);
+}
+
+svint64_t test_svxar_n_s64(svint64_t op1, svint64_t op2)
+{
+  // CHECK-LABEL: test_svxar_n_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.xar.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, i32 1)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svxar'}}
+  // expected-warning at +1 {{implicit declaration of function 'svxar_n_s64'}}
+  return SVE_ACLE_FUNC(svxar,_n_s64,,)(op1, op2, 1);
+}
+
+svint64_t test_svxar_n_s64_1(svint64_t op1, svint64_t op2)
+{
+  // CHECK-LABEL: test_svxar_n_s64_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.xar.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, i32 64)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svxar'}}
+  // expected-warning at +1 {{implicit declaration of function 'svxar_n_s64'}}
+  return SVE_ACLE_FUNC(svxar,_n_s64,,)(op1, op2, 64);
+}
+
+svuint8_t test_svxar_n_u8(svuint8_t op1, svuint8_t op2)
+{
+  // CHECK-LABEL: test_svxar_n_u8
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.xar.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, i32 1)
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svxar'}}
+  // expected-warning at +1 {{implicit declaration of function 'svxar_n_u8'}}
+  return SVE_ACLE_FUNC(svxar,_n_u8,,)(op1, op2, 1);
+}
+
+svuint8_t test_svxar_n_u8_1(svuint8_t op1, svuint8_t op2)
+{
+  // CHECK-LABEL: test_svxar_n_u8_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.xar.nxv16i8(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %op2, i32 8)
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svxar'}}
+  // expected-warning at +1 {{implicit declaration of function 'svxar_n_u8'}}
+  return SVE_ACLE_FUNC(svxar,_n_u8,,)(op1, op2, 8);
+}
+
+svuint16_t test_svxar_n_u16(svuint16_t op1, svuint16_t op2)
+{
+  // CHECK-LABEL: test_svxar_n_u16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.xar.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 1)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svxar'}}
+  // expected-warning at +1 {{implicit declaration of function 'svxar_n_u16'}}
+  return SVE_ACLE_FUNC(svxar,_n_u16,,)(op1, op2, 1);
+}
+
+svuint16_t test_svxar_n_u16_1(svuint16_t op1, svuint16_t op2)
+{
+  // CHECK-LABEL: test_svxar_n_u16_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.xar.nxv8i16(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %op2, i32 16)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svxar'}}
+  // expected-warning at +1 {{implicit declaration of function 'svxar_n_u16'}}
+  return SVE_ACLE_FUNC(svxar,_n_u16,,)(op1, op2, 16);
+}
+
+svuint32_t test_svxar_n_u32(svuint32_t op1, svuint32_t op2)
+{
+  // CHECK-LABEL: test_svxar_n_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.xar.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 1)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svxar'}}
+  // expected-warning at +1 {{implicit declaration of function 'svxar_n_u32'}}
+  return SVE_ACLE_FUNC(svxar,_n_u32,,)(op1, op2, 1);
+}
+
+svuint32_t test_svxar_n_u32_1(svuint32_t op1, svuint32_t op2)
+{
+  // CHECK-LABEL: test_svxar_n_u32_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.xar.nxv4i32(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %op2, i32 32)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svxar'}}
+  // expected-warning at +1 {{implicit declaration of function 'svxar_n_u32'}}
+  return SVE_ACLE_FUNC(svxar,_n_u32,,)(op1, op2, 32);
+}
+
+svuint64_t test_svxar_n_u64(svuint64_t op1, svuint64_t op2)
+{
+  // CHECK-LABEL: test_svxar_n_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.xar.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, i32 1)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svxar'}}
+  // expected-warning at +1 {{implicit declaration of function 'svxar_n_u64'}}
+  return SVE_ACLE_FUNC(svxar,_n_u64,,)(op1, op2, 1);
+}
+
+svuint64_t test_svxar_n_u64_1(svuint64_t op1, svuint64_t op2)
+{
+  // CHECK-LABEL: test_svxar_n_u64_1
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.xar.nxv2i64(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %op2, i32 64)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  // overload-warning at +2 {{implicit declaration of function 'svxar'}}
+  // expected-warning at +1 {{implicit declaration of function 'svxar_n_u64'}}
+  return SVE_ACLE_FUNC(svxar,_n_u64,,)(op1, op2, 64);
+}

diff  --git a/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_xar.c b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_xar.c
new file mode 100644
index 000000000000..2d96ab558781
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve2-intrinsics/negative/acle_sve2_xar.c
@@ -0,0 +1,83 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -fsyntax-only -verify %s
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+#include <arm_sve.h>
+
+svint8_t test_svxar_n_s8(svint8_t op1, svint8_t op2)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 8]}}
+  return SVE_ACLE_FUNC(svxar,_n_s8,,)(op1, op2, 0);
+}
+
+svint8_t test_svxar_n_s8_1(svint8_t op1, svint8_t op2)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 8]}}
+  return SVE_ACLE_FUNC(svxar,_n_s8,,)(op1, op2, 9);
+}
+
+svint16_t test_svxar_n_s16(svint16_t op1, svint16_t op2)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svxar,_n_s16,,)(op1, op2, 0);
+}
+
+svint16_t test_svxar_n_s16_1(svint16_t op1, svint16_t op2)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svxar,_n_s16,,)(op1, op2, 17);
+}
+
+svint32_t test_svxar_n_s32(svint32_t op1, svint32_t op2)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 32]}}
+  return SVE_ACLE_FUNC(svxar,_n_s32,,)(op1, op2, 0);
+}
+
+svint32_t test_svxar_n_s32_1(svint32_t op1, svint32_t op2)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 32]}}
+  return SVE_ACLE_FUNC(svxar,_n_s32,,)(op1, op2, 33);
+}
+
+svint64_t test_svxar_n_s64(svint64_t op1, svint64_t op2)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 64]}}
+  return SVE_ACLE_FUNC(svxar,_n_s64,,)(op1, op2, 0);
+}
+
+svint64_t test_svxar_n_s64_1(svint64_t op1, svint64_t op2)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 64]}}
+  return SVE_ACLE_FUNC(svxar,_n_s64,,)(op1, op2, 65);
+}
+
+svuint8_t test_svxar_n_u8(svuint8_t op1, svuint8_t op2)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 8]}}
+  return SVE_ACLE_FUNC(svxar,_n_u8,,)(op1, op2, 0);
+}
+
+svuint16_t test_svxar_n_u16(svuint16_t op1, svuint16_t op2)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 16]}}
+  return SVE_ACLE_FUNC(svxar,_n_u16,,)(op1, op2, 0);
+}
+
+svuint32_t test_svxar_n_u32(svuint32_t op1, svuint32_t op2)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 32]}}
+  return SVE_ACLE_FUNC(svxar,_n_u32,,)(op1, op2, 0);
+}
+
+svuint64_t test_svxar_n_u64(svuint64_t op1, svuint64_t op2)
+{
+  // expected-error-re at +1 {{argument value {{[0-9]+}} is outside the valid range [1, 64]}}
+  return SVE_ACLE_FUNC(svxar,_n_u64,,)(op1, op2, 0);
+}


        


More information about the cfe-commits mailing list